From f60435c878f0b91607f8b8d71050e61109b679a7 Mon Sep 17 00:00:00 2001 From: Lex Berezhny Date: Sat, 3 Nov 2018 18:50:34 -0400 Subject: [PATCH] moving and refactoring electrumx into torba --- .gitignore | 1 + .travis.yml | 2 - setup.cfg | 2 +- setup.py | 25 +- tests/integration/test_reconnect.py | 4 +- tests/integration/test_transactions.py | 5 +- torba/cli.py | 89 + torba/coin/bitcoincash.py | 2 +- torba/coin/bitcoinsegwit.py | 2 +- torba/server/__init__.py | 1 + torba/server/block_processor.py | 713 ++++++++ torba/server/coins.py | 2290 ++++++++++++++++++++++++ torba/server/daemon.py | 459 +++++ torba/server/db.py | 665 +++++++ torba/server/enum.py | 54 + torba/server/env.py | 169 ++ torba/server/env_base.py | 99 + torba/server/hash.py | 159 ++ torba/server/history.py | 324 ++++ torba/server/mempool.py | 365 ++++ torba/server/merkle.py | 254 +++ torba/server/peer.py | 301 ++++ torba/server/peers.py | 510 ++++++ torba/server/script.py | 251 +++ torba/server/server.py | 129 ++ torba/server/session.py | 1436 +++++++++++++++ torba/server/storage.py | 166 ++ torba/server/text.py | 82 + torba/server/tx.py | 625 +++++++ torba/server/util.py | 359 ++++ torba/testing/__init__.py | 1 + torba/testing/node.py | 344 ++++ torba/testing/service.py | 147 ++ torba/testing/testcase.py | 176 ++ tox.ini | 11 +- 35 files changed, 10200 insertions(+), 22 deletions(-) create mode 100644 torba/cli.py create mode 100644 torba/server/__init__.py create mode 100644 torba/server/block_processor.py create mode 100644 torba/server/coins.py create mode 100644 torba/server/daemon.py create mode 100644 torba/server/db.py create mode 100644 torba/server/enum.py create mode 100644 torba/server/env.py create mode 100644 torba/server/env_base.py create mode 100644 torba/server/hash.py create mode 100644 torba/server/history.py create mode 100644 torba/server/mempool.py create mode 100644 torba/server/merkle.py create mode 100644 torba/server/peer.py create mode 100644 torba/server/peers.py create mode 100644 torba/server/script.py create mode 100644 torba/server/server.py create mode 100644 torba/server/session.py create mode 100644 torba/server/storage.py create mode 100644 torba/server/text.py create mode 100644 torba/server/tx.py create mode 100644 torba/server/util.py create mode 100644 torba/testing/__init__.py create mode 100644 torba/testing/node.py create mode 100644 torba/testing/service.py create mode 100644 torba/testing/testcase.py diff --git a/.gitignore b/.gitignore index d7a60a79b..3a20b56c5 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ dist/ # testing .tox/ tests/unit/bitcoin_headers +torba/bin # cache and logs __pycache__/ diff --git a/.travis.yml b/.travis.yml index ccb311a02..3169a13fc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,8 +22,6 @@ jobs: env: TESTTYPE=unit install: - pip install tox-travis - - pushd .. && git clone https://github.com/lbryio/electrumx.git --branch lbryumx && popd - - pushd .. && git clone https://github.com/lbryio/orchstr8.git && popd script: tox - <<: *tests python: "3.6" diff --git a/setup.cfg b/setup.cfg index ab05b38df..9a037263d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,7 +10,7 @@ source = ignore_missing_imports = True [pylint] -ignore=words +ignore=words,server max-args=10 max-line-length=110 good-names=T,t,n,i,j,k,x,y,s,f,d,h,c,e,op,db,tx,io,cachedproperty,log,id diff --git a/setup.py b/setup.py index 2531fa89e..f7e59b068 100644 --- a/setup.py +++ b/setup.py @@ -3,6 +3,10 @@ from setuptools import setup, find_packages import torba +BASE = os.path.dirname(__file__) +with open(os.path.join(BASE, 'README.md'), encoding='utf-8') as fh: + long_description = fh.read() + setup( name='torba', version=torba.__version__, @@ -10,11 +14,10 @@ setup( license='MIT', author='LBRY Inc.', author_email='hello@lbry.io', - description='Wallet library for bitcoin based currencies.', - long_description=open(os.path.join(os.path.dirname(__file__), 'README.md'), - encoding='utf-8').read(), + description='Wallet client/server framework for bitcoin based currencies.', + long_description=long_description, long_description_content_type="text/markdown", - keywords='wallet,crypto,currency,money,bitcoin,lbry', + keywords='wallet,crypto,currency,money,bitcoin,electrum,electrumx', classifiers=( 'Framework :: AsyncIO', 'Intended Audience :: Developers', @@ -23,13 +26,16 @@ setup( 'Programming Language :: Python :: 3', 'Operating System :: OS Independent', 'Topic :: Internet', + 'Topic :: Software Development :: Testing', 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: System :: Benchmark', 'Topic :: System :: Distributed Computing', 'Topic :: Utilities', ), packages=find_packages(exclude=('tests',)), python_requires='>=3.6', install_requires=( + 'aiohttp', 'aiorpcx==0.9.0', 'coincurve', 'pbkdf2', @@ -38,6 +44,13 @@ setup( extras_require={ 'test': ( 'mock', - ) - } + 'requests', + ), + 'server': ( + 'attrs', + 'plyvel', + 'pylru' + ), + }, + entry_points={'console_scripts': ['torba=torba.cli:main']} ) diff --git a/tests/integration/test_reconnect.py b/tests/integration/test_reconnect.py index b7a3b9d81..a2cb19d67 100644 --- a/tests/integration/test_reconnect.py +++ b/tests/integration/test_reconnect.py @@ -1,3 +1,4 @@ +import logging from asyncio import CancelledError from orchstr8.testcase import IntegrationTestCase @@ -6,7 +7,7 @@ from torba.constants import COIN class ReconnectTests(IntegrationTestCase): - VERBOSE = False + VERBOSITY = logging.DEBUG async def test_connection_drop_still_receives_events_after_reconnected(self): address1 = await self.account.receiving.get_or_create_usable_address() @@ -31,4 +32,3 @@ class ReconnectTests(IntegrationTestCase): await self.blockchain.generate(1) # omg, the burned cable still works! torba is fire proof! await self.ledger.network.get_transaction(sendtxid) - diff --git a/tests/integration/test_transactions.py b/tests/integration/test_transactions.py index 8b5c7a263..99c5cd5fd 100644 --- a/tests/integration/test_transactions.py +++ b/tests/integration/test_transactions.py @@ -1,11 +1,12 @@ +import logging import asyncio -from orchstr8.testcase import IntegrationTestCase +from torba.testing import IntegrationTestCase from torba.constants import COIN class BasicTransactionTests(IntegrationTestCase): - VERBOSE = False + VERBOSITY = logging.WARNING async def test_sending_and_receiving(self): account1, account2 = self.account, self.wallet.generate_account(self.ledger) diff --git a/torba/cli.py b/torba/cli.py new file mode 100644 index 000000000..17d293737 --- /dev/null +++ b/torba/cli.py @@ -0,0 +1,89 @@ +import logging +import argparse +import asyncio +import aiohttp + +from torba.testing.node import Conductor, get_ledger_from_environment, get_blockchain_node_from_ledger +from torba.testing.service import TestingServiceAPI + + +def get_argument_parser(): + parser = argparse.ArgumentParser( + prog="torba" + ) + subparsers = parser.add_subparsers(dest='command', help='sub-command help') + + gui = subparsers.add_parser("gui", help="Start Qt GUI.") + + download = subparsers.add_parser("download", help="Download blockchain node binary.") + + start = subparsers.add_parser("start", help="Start orchstr8 service.") + start.add_argument("--blockchain", help="Start blockchain node.", action="store_true") + start.add_argument("--spv", help="Start SPV server.", action="store_true") + start.add_argument("--wallet", help="Start wallet daemon.", action="store_true") + + generate = subparsers.add_parser("generate", help="Call generate method on running orchstr8 instance.") + generate.add_argument("blocks", type=int, help="Number of blocks to generate") + + transfer = subparsers.add_parser("transfer", help="Call transfer method on running orchstr8 instance.") + return parser + + +async def run_remote_command(command, **kwargs): + async with aiohttp.ClientSession() as session: + async with session.post('http://localhost:7954/'+command, data=kwargs) as resp: + print(resp.status) + print(await resp.text()) + + +def main(): + parser = get_argument_parser() + args = parser.parse_args() + command = getattr(args, 'command', 'help') + + if command == 'gui': + from torba.workbench import main as start_app + return start_app() + + loop = asyncio.get_event_loop() + ledger = get_ledger_from_environment() + + if command == 'download': + logging.getLogger('blockchain').setLevel(logging.INFO) + get_blockchain_node_from_ledger(ledger).ensure() + + elif command == 'generate': + loop.run_until_complete(run_remote_command( + 'generate', blocks=args.blocks + )) + + elif command == 'start': + + conductor = Conductor() + if getattr(args, 'blockchain', False): + loop.run_until_complete(conductor.start_blockchain()) + if getattr(args, 'spv', False): + loop.run_until_complete(conductor.start_spv()) + if getattr(args, 'wallet', False): + loop.run_until_complete(conductor.start_wallet()) + + service = TestingServiceAPI(conductor, loop) + loop.run_until_complete(service.start()) + + try: + print('========== Orchstr8 API Service Started ========') + loop.run_forever() + except KeyboardInterrupt: + pass + finally: + loop.run_until_complete(service.stop()) + loop.run_until_complete(conductor.stop()) + + loop.close() + + else: + parser.print_help() + + +if __name__ == "__main__": + main() diff --git a/torba/coin/bitcoincash.py b/torba/coin/bitcoincash.py index 77e991217..29e548910 100644 --- a/torba/coin/bitcoincash.py +++ b/torba/coin/bitcoincash.py @@ -4,7 +4,7 @@ __node_bin__ = 'bitcoin-abc-0.17.2/bin' __node_url__ = ( 'https://download.bitcoinabc.org/0.17.2/linux/bitcoin-abc-0.17.2-x86_64-linux-gnu.tar.gz' ) -__electrumx__ = 'electrumx.lib.coins.BitcoinCashRegtest' +__spvserver__ = 'torba.server.coins.BitcoinCashRegtest' from binascii import unhexlify from torba.baseledger import BaseLedger diff --git a/torba/coin/bitcoinsegwit.py b/torba/coin/bitcoinsegwit.py index 167551086..13cffbd01 100644 --- a/torba/coin/bitcoinsegwit.py +++ b/torba/coin/bitcoinsegwit.py @@ -4,7 +4,7 @@ __node_bin__ = 'bitcoin-0.16.3/bin' __node_url__ = ( 'https://bitcoin.org/bin/bitcoin-core-0.16.3/bitcoin-0.16.3-x86_64-linux-gnu.tar.gz' ) -__electrumx__ = 'electrumx.lib.coins.BitcoinSegwitRegtest' +__spvserver__ = 'torba.server.coins.BitcoinSegwitRegtest' import struct from typing import Optional diff --git a/torba/server/__init__.py b/torba/server/__init__.py new file mode 100644 index 000000000..b7f2cf595 --- /dev/null +++ b/torba/server/__init__.py @@ -0,0 +1 @@ +from .server import Server diff --git a/torba/server/block_processor.py b/torba/server/block_processor.py new file mode 100644 index 000000000..a27c48737 --- /dev/null +++ b/torba/server/block_processor.py @@ -0,0 +1,713 @@ +# Copyright (c) 2016-2017, Neil Booth +# Copyright (c) 2017, the ElectrumX authors +# +# All rights reserved. +# +# See the file "LICENCE" for information about the copyright +# and warranty status of this software. + +'''Block prefetcher and chain processor.''' + + +import array +import asyncio +from struct import pack, unpack +import time +from functools import partial + +from aiorpcx import TaskGroup, run_in_thread + +import torba +from torba.server.daemon import DaemonError +from torba.server.hash import hash_to_hex_str, HASHX_LEN +from torba.server.util import chunks, class_logger +from torba.server.db import FlushData + + +class Prefetcher(object): + '''Prefetches blocks (in the forward direction only).''' + + def __init__(self, daemon, coin, blocks_event): + self.logger = class_logger(__name__, self.__class__.__name__) + self.daemon = daemon + self.coin = coin + self.blocks_event = blocks_event + self.blocks = [] + self.caught_up = False + # Access to fetched_height should be protected by the semaphore + self.fetched_height = None + self.semaphore = asyncio.Semaphore() + self.refill_event = asyncio.Event() + # The prefetched block cache size. The min cache size has + # little effect on sync time. + self.cache_size = 0 + self.min_cache_size = 10 * 1024 * 1024 + # This makes the first fetch be 10 blocks + self.ave_size = self.min_cache_size // 10 + self.polling_delay = 5 + + async def main_loop(self, bp_height): + '''Loop forever polling for more blocks.''' + await self.reset_height(bp_height) + while True: + try: + # Sleep a while if there is nothing to prefetch + await self.refill_event.wait() + if not await self._prefetch_blocks(): + await asyncio.sleep(self.polling_delay) + except DaemonError as e: + self.logger.info(f'ignoring daemon error: {e}') + + def get_prefetched_blocks(self): + '''Called by block processor when it is processing queued blocks.''' + blocks = self.blocks + self.blocks = [] + self.cache_size = 0 + self.refill_event.set() + return blocks + + async def reset_height(self, height): + '''Reset to prefetch blocks from the block processor's height. + + Used in blockchain reorganisations. This coroutine can be + called asynchronously to the _prefetch_blocks coroutine so we + must synchronize with a semaphore. + ''' + async with self.semaphore: + self.blocks.clear() + self.cache_size = 0 + self.fetched_height = height + self.refill_event.set() + + daemon_height = await self.daemon.height() + behind = daemon_height - height + if behind > 0: + self.logger.info('catching up to daemon height {:,d} ' + '({:,d} blocks behind)' + .format(daemon_height, behind)) + else: + self.logger.info('caught up to daemon height {:,d}' + .format(daemon_height)) + + async def _prefetch_blocks(self): + '''Prefetch some blocks and put them on the queue. + + Repeats until the queue is full or caught up. + ''' + daemon = self.daemon + daemon_height = await daemon.height() + async with self.semaphore: + while self.cache_size < self.min_cache_size: + # Try and catch up all blocks but limit to room in cache. + # Constrain fetch count to between 0 and 500 regardless; + # testnet can be lumpy. + cache_room = self.min_cache_size // self.ave_size + count = min(daemon_height - self.fetched_height, cache_room) + count = min(500, max(count, 0)) + if not count: + self.caught_up = True + return False + + first = self.fetched_height + 1 + hex_hashes = await daemon.block_hex_hashes(first, count) + if self.caught_up: + self.logger.info('new block height {:,d} hash {}' + .format(first + count-1, hex_hashes[-1])) + blocks = await daemon.raw_blocks(hex_hashes) + + assert count == len(blocks) + + # Special handling for genesis block + if first == 0: + blocks[0] = self.coin.genesis_block(blocks[0]) + self.logger.info('verified genesis block with hash {}' + .format(hex_hashes[0])) + + # Update our recent average block size estimate + size = sum(len(block) for block in blocks) + if count >= 10: + self.ave_size = size // count + else: + self.ave_size = (size + (10 - count) * self.ave_size) // 10 + + self.blocks.extend(blocks) + self.cache_size += size + self.fetched_height += count + self.blocks_event.set() + + self.refill_event.clear() + return True + + +class ChainError(Exception): + '''Raised on error processing blocks.''' + + +class BlockProcessor(object): + '''Process blocks and update the DB state to match. + + Employ a prefetcher to prefetch blocks in batches for processing. + Coordinate backing up in case of chain reorganisations. + ''' + + def __init__(self, env, db, daemon, notifications): + self.env = env + self.db = db + self.daemon = daemon + self.notifications = notifications + + self.coin = env.coin + self.blocks_event = asyncio.Event() + self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event) + self.logger = class_logger(__name__, self.__class__.__name__) + + # Meta + self.next_cache_check = 0 + self.touched = set() + self.reorg_count = 0 + + # Caches of unflushed items. + self.headers = [] + self.tx_hashes = [] + self.undo_infos = [] + + # UTXO cache + self.utxo_cache = {} + self.db_deletes = [] + + # If the lock is successfully acquired, in-memory chain state + # is consistent with self.height + self.state_lock = asyncio.Lock() + + async def run_in_thread_with_lock(self, func, *args): + # Run in a thread to prevent blocking. Shielded so that + # cancellations from shutdown don't lose work - when the task + # completes the data will be flushed and then we shut down. + # Take the state lock to be certain in-memory state is + # consistent and not being updated elsewhere. + async def run_in_thread_locked(): + async with self.state_lock: + return await run_in_thread(func, *args) + return await asyncio.shield(run_in_thread_locked()) + + async def check_and_advance_blocks(self, raw_blocks): + '''Process the list of raw blocks passed. Detects and handles + reorgs. + ''' + if not raw_blocks: + return + first = self.height + 1 + blocks = [self.coin.block(raw_block, first + n) + for n, raw_block in enumerate(raw_blocks)] + headers = [block.header for block in blocks] + hprevs = [self.coin.header_prevhash(h) for h in headers] + chain = [self.tip] + [self.coin.header_hash(h) for h in headers[:-1]] + + if hprevs == chain: + start = time.time() + await self.run_in_thread_with_lock(self.advance_blocks, blocks) + await self._maybe_flush() + if not self.db.first_sync: + s = '' if len(blocks) == 1 else 's' + self.logger.info('processed {:,d} block{} in {:.1f}s' + .format(len(blocks), s, + time.time() - start)) + if self._caught_up_event.is_set(): + await self.notifications.on_block(self.touched, self.height) + self.touched = set() + elif hprevs[0] != chain[0]: + await self.reorg_chain() + else: + # It is probably possible but extremely rare that what + # bitcoind returns doesn't form a chain because it + # reorg-ed the chain as it was processing the batched + # block hash requests. Should this happen it's simplest + # just to reset the prefetcher and try again. + self.logger.warning('daemon blocks do not form a chain; ' + 'resetting the prefetcher') + await self.prefetcher.reset_height(self.height) + + async def reorg_chain(self, count=None): + '''Handle a chain reorganisation. + + Count is the number of blocks to simulate a reorg, or None for + a real reorg.''' + if count is None: + self.logger.info('chain reorg detected') + else: + self.logger.info(f'faking a reorg of {count:,d} blocks') + await self.flush(True) + + async def get_raw_blocks(last_height, hex_hashes): + heights = range(last_height, last_height - len(hex_hashes), -1) + try: + blocks = [self.db.read_raw_block(height) for height in heights] + self.logger.info(f'read {len(blocks)} blocks from disk') + return blocks + except FileNotFoundError: + return await self.daemon.raw_blocks(hex_hashes) + + def flush_backup(): + # self.touched can include other addresses which is + # harmless, but remove None. + self.touched.discard(None) + self.db.flush_backup(self.flush_data(), self.touched) + + start, last, hashes = await self.reorg_hashes(count) + # Reverse and convert to hex strings. + hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)] + for hex_hashes in chunks(hashes, 50): + raw_blocks = await get_raw_blocks(last, hex_hashes) + await self.run_in_thread_with_lock(self.backup_blocks, raw_blocks) + await self.run_in_thread_with_lock(flush_backup) + last -= len(raw_blocks) + await self.prefetcher.reset_height(self.height) + + async def reorg_hashes(self, count): + '''Return a pair (start, last, hashes) of blocks to back up during a + reorg. + + The hashes are returned in order of increasing height. Start + is the height of the first hash, last of the last. + ''' + start, count = await self.calc_reorg_range(count) + last = start + count - 1 + s = '' if count == 1 else 's' + self.logger.info(f'chain was reorganised replacing {count:,d} ' + f'block{s} at heights {start:,d}-{last:,d}') + + return start, last, await self.db.fs_block_hashes(start, count) + + async def calc_reorg_range(self, count): + '''Calculate the reorg range''' + + def diff_pos(hashes1, hashes2): + '''Returns the index of the first difference in the hash lists. + If both lists match returns their length.''' + for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)): + if hash1 != hash2: + return n + return len(hashes) + + if count is None: + # A real reorg + start = self.height - 1 + count = 1 + while start > 0: + hashes = await self.db.fs_block_hashes(start, count) + hex_hashes = [hash_to_hex_str(hash) for hash in hashes] + d_hex_hashes = await self.daemon.block_hex_hashes(start, count) + n = diff_pos(hex_hashes, d_hex_hashes) + if n > 0: + start += n + break + count = min(count * 2, start) + start -= count + + count = (self.height - start) + 1 + else: + start = (self.height - count) + 1 + + return start, count + + def estimate_txs_remaining(self): + # Try to estimate how many txs there are to go + daemon_height = self.daemon.cached_height() + coin = self.coin + tail_count = daemon_height - max(self.height, coin.TX_COUNT_HEIGHT) + # Damp the initial enthusiasm + realism = max(2.0 - 0.9 * self.height / coin.TX_COUNT_HEIGHT, 1.0) + return (tail_count * coin.TX_PER_BLOCK + + max(coin.TX_COUNT - self.tx_count, 0)) * realism + + # - Flushing + def flush_data(self): + '''The data for a flush. The lock must be taken.''' + assert self.state_lock.locked() + return FlushData(self.height, self.tx_count, self.headers, + self.tx_hashes, self.undo_infos, self.utxo_cache, + self.db_deletes, self.tip) + + async def flush(self, flush_utxos): + def flush(): + self.db.flush_dbs(self.flush_data(), flush_utxos, + self.estimate_txs_remaining) + await self.run_in_thread_with_lock(flush) + + async def _maybe_flush(self): + # If caught up, flush everything as client queries are + # performed on the DB. + if self._caught_up_event.is_set(): + await self.flush(True) + elif time.time() > self.next_cache_check: + flush_arg = self.check_cache_size() + if flush_arg is not None: + await self.flush(flush_arg) + self.next_cache_check = time.time() + 30 + + def check_cache_size(self): + '''Flush a cache if it gets too big.''' + # Good average estimates based on traversal of subobjects and + # requesting size from Python (see deep_getsizeof). + one_MB = 1000*1000 + utxo_cache_size = len(self.utxo_cache) * 205 + db_deletes_size = len(self.db_deletes) * 57 + hist_cache_size = self.db.history.unflushed_memsize() + # Roughly ntxs * 32 + nblocks * 42 + tx_hash_size = ((self.tx_count - self.db.fs_tx_count) * 32 + + (self.height - self.db.fs_height) * 42) + utxo_MB = (db_deletes_size + utxo_cache_size) // one_MB + hist_MB = (hist_cache_size + tx_hash_size) // one_MB + + self.logger.info('our height: {:,d} daemon: {:,d} ' + 'UTXOs {:,d}MB hist {:,d}MB' + .format(self.height, self.daemon.cached_height(), + utxo_MB, hist_MB)) + + # Flush history if it takes up over 20% of cache memory. + # Flush UTXOs once they take up 80% of cache memory. + cache_MB = self.env.cache_MB + if utxo_MB + hist_MB >= cache_MB or hist_MB >= cache_MB // 5: + return utxo_MB >= cache_MB * 4 // 5 + return None + + def advance_blocks(self, blocks): + '''Synchronously advance the blocks. + + It is already verified they correctly connect onto our tip. + ''' + min_height = self.db.min_undo_height(self.daemon.cached_height()) + height = self.height + + for block in blocks: + height += 1 + undo_info = self.advance_txs(block.transactions) + if height >= min_height: + self.undo_infos.append((undo_info, height)) + self.db.write_raw_block(block.raw, height) + + headers = [block.header for block in blocks] + self.height = height + self.headers.extend(headers) + self.tip = self.coin.header_hash(headers[-1]) + + def advance_txs(self, txs): + self.tx_hashes.append(b''.join(tx_hash for tx, tx_hash in txs)) + + # Use local vars for speed in the loops + undo_info = [] + tx_num = self.tx_count + script_hashX = self.coin.hashX_from_script + s_pack = pack + put_utxo = self.utxo_cache.__setitem__ + spend_utxo = self.spend_utxo + undo_info_append = undo_info.append + update_touched = self.touched.update + hashXs_by_tx = [] + append_hashXs = hashXs_by_tx.append + + for tx, tx_hash in txs: + hashXs = [] + append_hashX = hashXs.append + tx_numb = s_pack('= len(raw_blocks) + + coin = self.coin + for raw_block in raw_blocks: + # Check and update self.tip + block = coin.block(raw_block, self.height) + header_hash = coin.header_hash(block.header) + if header_hash != self.tip: + raise ChainError('backup block {} not tip {} at height {:,d}' + .format(hash_to_hex_str(header_hash), + hash_to_hex_str(self.tip), + self.height)) + self.tip = coin.header_prevhash(block.header) + self.backup_txs(block.transactions) + self.height -= 1 + self.db.tx_counts.pop() + + self.logger.info('backed up to height {:,d}'.format(self.height)) + + def backup_txs(self, txs): + # Prevout values, in order down the block (coinbase first if present) + # undo_info is in reverse block order + undo_info = self.db.read_undo_info(self.height) + if undo_info is None: + raise ChainError('no undo information found for height {:,d}' + .format(self.height)) + n = len(undo_info) + + # Use local vars for speed in the loops + s_pack = pack + put_utxo = self.utxo_cache.__setitem__ + spend_utxo = self.spend_utxo + script_hashX = self.coin.hashX_from_script + touched = self.touched + undo_entry_len = 12 + HASHX_LEN + + for tx, tx_hash in reversed(txs): + for idx, txout in enumerate(tx.outputs): + # Spend the TX outputs. Be careful with unspendable + # outputs - we didn't save those in the first place. + hashX = script_hashX(txout.pk_script) + if hashX: + cache_value = spend_utxo(tx_hash, idx) + touched.add(cache_value[:-12]) + + # Restore the inputs + for txin in reversed(tx.inputs): + if txin.is_generation(): + continue + n -= undo_entry_len + undo_item = undo_info[n:n + undo_entry_len] + put_utxo(txin.prev_hash + s_pack(' 1: + tx_num, = unpack('False state. + first_sync = self.db.first_sync + self.db.first_sync = False + await self.flush(True) + if first_sync: + self.logger.info(f'{torba.__version__} synced to ' + f'height {self.height:,d}') + # Reopen for serving + await self.db.open_for_serving() + + async def _first_open_dbs(self): + await self.db.open_for_sync() + self.height = self.db.db_height + self.tip = self.db.db_tip + self.tx_count = self.db.db_tx_count + + # --- External API + + async def fetch_and_process_blocks(self, caught_up_event): + '''Fetch, process and index blocks from the daemon. + + Sets caught_up_event when first caught up. Flushes to disk + and shuts down cleanly if cancelled. + + This is mainly because if, during initial sync ElectrumX is + asked to shut down when a large number of blocks have been + processed but not written to disk, it should write those to + disk before exiting, as otherwise a significant amount of work + could be lost. + ''' + self._caught_up_event = caught_up_event + await self._first_open_dbs() + try: + async with TaskGroup() as group: + await group.spawn(self.prefetcher.main_loop(self.height)) + await group.spawn(self._process_prefetched_blocks()) + finally: + # Shut down block processing + self.logger.info('flushing to DB for a clean shutdown...') + await self.flush(True) + + def force_chain_reorg(self, count): + '''Force a reorg of the given number of blocks. + + Returns True if a reorg is queued, false if not caught up. + ''' + if self._caught_up_event.is_set(): + self.reorg_count = count + self.blocks_event.set() + return True + return False + + +class DecredBlockProcessor(BlockProcessor): + async def calc_reorg_range(self, count): + start, count = await super().calc_reorg_range(count) + if start > 0: + # A reorg in Decred can invalidate the previous block + start -= 1 + count += 1 + return start, count + + +class NamecoinBlockProcessor(BlockProcessor): + def advance_txs(self, txs): + result = super().advance_txs(txs) + + tx_num = self.tx_count - len(txs) + script_name_hashX = self.coin.name_hashX_from_script + update_touched = self.touched.update + hashXs_by_tx = [] + append_hashXs = hashXs_by_tx.append + + for tx, tx_hash in txs: + hashXs = [] + append_hashX = hashXs.append + + # Add the new UTXOs and associate them with the name script + for idx, txout in enumerate(tx.outputs): + # Get the hashX of the name script. Ignore non-name scripts. + hashX = script_name_hashX(txout.pk_script) + if hashX: + append_hashX(hashX) + + append_hashXs(hashXs) + update_touched(hashXs) + tx_num += 1 + + self.db.history.add_unflushed(hashXs_by_tx, self.tx_count - len(txs)) + + return result diff --git a/torba/server/coins.py b/torba/server/coins.py new file mode 100644 index 000000000..af1829842 --- /dev/null +++ b/torba/server/coins.py @@ -0,0 +1,2290 @@ +# Copyright (c) 2016-2017, Neil Booth +# Copyright (c) 2017, the ElectrumX authors +# +# All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +'''Module providing coin abstraction. + +Anything coin-specific should go in this file and be subclassed where +necessary for appropriate handling. +''' + +from collections import namedtuple +import re +import struct +from decimal import Decimal +from hashlib import sha256 +from functools import partial +import base64 +from typing import Type, List + +import torba.server.util as util +from torba.server.hash import Base58, hash160, double_sha256, hash_to_hex_str +from torba.server.hash import HASHX_LEN, hex_str_to_hash +from torba.server.script import ScriptPubKey, OpCodes +import torba.server.tx as lib_tx +import torba.server.block_processor as block_proc +import torba.server.daemon as daemon +from torba.server.session import ElectrumX, DashElectrumX + + +Block = namedtuple("Block", "raw header transactions") +OP_RETURN = OpCodes.OP_RETURN + + +class CoinError(Exception): + '''Exception raised for coin-related errors.''' + + +class Coin(object): + '''Base class of coin hierarchy.''' + + REORG_LIMIT = 200 + # Not sure if these are coin-specific + RPC_URL_REGEX = re.compile('.+@(\\[[0-9a-fA-F:]+\\]|[^:]+)(:[0-9]+)?') + VALUE_PER_COIN = 100000000 + CHUNK_SIZE = 2016 + BASIC_HEADER_SIZE = 80 + STATIC_BLOCK_HEADERS = True + SESSIONCLS = ElectrumX + DESERIALIZER = lib_tx.Deserializer + DAEMON = daemon.Daemon + BLOCK_PROCESSOR = block_proc.BlockProcessor + HEADER_VALUES = [ + 'version', 'prev_block_hash', 'merkle_root', 'timestamp', 'bits', 'nonce' + ] + HEADER_UNPACK = struct.Struct('< I 32s 32s I I I').unpack_from + MEMPOOL_HISTOGRAM_REFRESH_SECS = 500 + XPUB_VERBYTES = bytes('????', 'utf-8') + XPRV_VERBYTES = bytes('????', 'utf-8') + ENCODE_CHECK = Base58.encode_check + DECODE_CHECK = Base58.decode_check + # Peer discovery + PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'} + PEERS: List[str] = [] + + @classmethod + def lookup_coin_class(cls, name, net): + '''Return a coin class given name and network. + + Raise an exception if unrecognised.''' + req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK'] + for coin in util.subclasses(Coin): + if (coin.NAME.lower() == name.lower() and + coin.NET.lower() == net.lower()): + coin_req_attrs = req_attrs.copy() + missing = [attr for attr in coin_req_attrs + if not hasattr(coin, attr)] + if missing: + raise CoinError('coin {} missing {} attributes' + .format(name, missing)) + return coin + raise CoinError('unknown coin {} and network {} combination' + .format(name, net)) + + @classmethod + def sanitize_url(cls, url): + # Remove surrounding ws and trailing /s + url = url.strip().rstrip('/') + match = cls.RPC_URL_REGEX.match(url) + if not match: + raise CoinError('invalid daemon URL: "{}"'.format(url)) + if match.groups()[1] is None: + url += ':{:d}'.format(cls.RPC_PORT) + if not url.startswith('http://') and not url.startswith('https://'): + url = 'http://' + url + return url + '/' + + @classmethod + def genesis_block(cls, block): + '''Check the Genesis block is the right one for this coin. + + Return the block less its unspendable coinbase. + ''' + header = cls.block_header(block, 0) + header_hex_hash = hash_to_hex_str(cls.header_hash(header)) + if header_hex_hash != cls.GENESIS_HASH: + raise CoinError('genesis block has hash {} expected {}' + .format(header_hex_hash, cls.GENESIS_HASH)) + + return header + bytes(1) + + @classmethod + def hashX_from_script(cls, script): + '''Returns a hashX from a script, or None if the script is provably + unspendable so the output can be dropped. + ''' + if script and script[0] == OP_RETURN: + return None + return sha256(script).digest()[:HASHX_LEN] + + @staticmethod + def lookup_xverbytes(verbytes): + '''Return a (is_xpub, coin_class) pair given xpub/xprv verbytes.''' + # Order means BTC testnet will override NMC testnet + for coin in util.subclasses(Coin): + if verbytes == coin.XPUB_VERBYTES: + return True, coin + if verbytes == coin.XPRV_VERBYTES: + return False, coin + raise CoinError('version bytes unrecognised') + + @classmethod + def address_to_hashX(cls, address): + '''Return a hashX given a coin address.''' + return cls.hashX_from_script(cls.pay_to_address_script(address)) + + @classmethod + def P2PKH_address_from_hash160(cls, hash160): + '''Return a P2PKH address given a public key.''' + assert len(hash160) == 20 + return cls.ENCODE_CHECK(cls.P2PKH_VERBYTE + hash160) + + @classmethod + def P2PKH_address_from_pubkey(cls, pubkey): + '''Return a coin address given a public key.''' + return cls.P2PKH_address_from_hash160(hash160(pubkey)) + + @classmethod + def P2SH_address_from_hash160(cls, hash160): + '''Return a coin address given a hash160.''' + assert len(hash160) == 20 + return cls.ENCODE_CHECK(cls.P2SH_VERBYTES[0] + hash160) + + @classmethod + def hash160_to_P2PKH_script(cls, hash160): + return ScriptPubKey.P2PKH_script(hash160) + + @classmethod + def hash160_to_P2PKH_hashX(cls, hash160): + return cls.hashX_from_script(cls.hash160_to_P2PKH_script(hash160)) + + @classmethod + def pay_to_address_script(cls, address): + '''Return a pubkey script that pays to a pubkey hash. + + Pass the address (either P2PKH or P2SH) in base58 form. + ''' + raw = cls.DECODE_CHECK(address) + + # Require version byte(s) plus hash160. + verbyte = -1 + verlen = len(raw) - 20 + if verlen > 0: + verbyte, hash160 = raw[:verlen], raw[verlen:] + + if verbyte == cls.P2PKH_VERBYTE: + return cls.hash160_to_P2PKH_script(hash160) + if verbyte in cls.P2SH_VERBYTES: + return ScriptPubKey.P2SH_script(hash160) + + raise CoinError('invalid address: {}'.format(address)) + + @classmethod + def privkey_WIF(cls, privkey_bytes, compressed): + '''Return the private key encoded in Wallet Import Format.''' + payload = bytearray(cls.WIF_BYTE) + privkey_bytes + if compressed: + payload.append(0x01) + return cls.ENCODE_CHECK(payload) + + @classmethod + def header_hash(cls, header): + '''Given a header return hash''' + return double_sha256(header) + + @classmethod + def header_prevhash(cls, header): + '''Given a header return previous hash''' + return header[4:36] + + @classmethod + def static_header_offset(cls, height): + '''Given a header height return its offset in the headers file. + + If header sizes change at some point, this is the only code + that needs updating.''' + assert cls.STATIC_BLOCK_HEADERS + return height * cls.BASIC_HEADER_SIZE + + @classmethod + def static_header_len(cls, height): + '''Given a header height return its length.''' + return (cls.static_header_offset(height + 1) + - cls.static_header_offset(height)) + + @classmethod + def block_header(cls, block, height): + '''Returns the block header given a block and its height.''' + return block[:cls.static_header_len(height)] + + @classmethod + def block(cls, raw_block, height): + '''Return a Block namedtuple given a raw block and its height.''' + header = cls.block_header(raw_block, height) + txs = cls.DESERIALIZER(raw_block, start=len(header)).read_tx_block() + return Block(raw_block, header, txs) + + @classmethod + def decimal_value(cls, value): + '''Return the number of standard coin units as a Decimal given a + quantity of smallest units. + + For example 1 BTC is returned for 100 million satoshis. + ''' + return Decimal(value) / cls.VALUE_PER_COIN + + @classmethod + def electrum_header(cls, header, height): + h = dict(zip(cls.HEADER_VALUES, cls.HEADER_UNPACK(header))) + # Add the height that is not present in the header itself + h['block_height'] = height + # Convert bytes to str + h['prev_block_hash'] = hash_to_hex_str(h['prev_block_hash']) + h['merkle_root'] = hash_to_hex_str(h['merkle_root']) + return h + + +class AuxPowMixin(object): + STATIC_BLOCK_HEADERS = False + DESERIALIZER = lib_tx.DeserializerAuxPow + + @classmethod + def header_hash(cls, header): + '''Given a header return hash''' + return double_sha256(header[:cls.BASIC_HEADER_SIZE]) + + @classmethod + def block_header(cls, block, height): + '''Return the AuxPow block header bytes''' + deserializer = cls.DESERIALIZER(block) + return deserializer.read_header(height, cls.BASIC_HEADER_SIZE) + + +class EquihashMixin(object): + STATIC_BLOCK_HEADERS = False + BASIC_HEADER_SIZE = 140 # Excluding Equihash solution + DESERIALIZER = lib_tx.DeserializerEquihash + HEADER_VALUES = ['version', 'prev_block_hash', 'merkle_root', 'reserved', + 'timestamp', 'bits', 'nonce'] + HEADER_UNPACK = struct.Struct('< I 32s 32s 32s I I 32s').unpack_from + + @classmethod + def electrum_header(cls, header, height): + h = dict(zip(cls.HEADER_VALUES, cls.HEADER_UNPACK(header))) + # Add the height that is not present in the header itself + h['block_height'] = height + # Convert bytes to str + h['prev_block_hash'] = hash_to_hex_str(h['prev_block_hash']) + h['merkle_root'] = hash_to_hex_str(h['merkle_root']) + h['reserved'] = hash_to_hex_str(h['reserved']) + h['nonce'] = hash_to_hex_str(h['nonce']) + return h + + @classmethod + def block_header(cls, block, height): + '''Return the block header bytes''' + deserializer = cls.DESERIALIZER(block) + return deserializer.read_header(height, cls.BASIC_HEADER_SIZE) + + +class ScryptMixin(object): + + DESERIALIZER = lib_tx.DeserializerTxTime + HEADER_HASH = None + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + if cls.HEADER_HASH is None: + import scrypt + cls.HEADER_HASH = lambda x: scrypt.hash(x, x, 1024, 1, 1, 32) + + version, = util.unpack_le_uint32_from(header) + if version > 6: + return super().header_hash(header) + else: + return cls.HEADER_HASH(header) + + +class KomodoMixin(object): + P2PKH_VERBYTE = bytes.fromhex("3C") + P2SH_VERBYTES = [bytes.fromhex("55")] + WIF_BYTE = bytes.fromhex("BC") + GENESIS_HASH = ('027e3758c3a65b12aa1046462b486d0a' + '63bfa1beae327897f56c5cfb7daaae71') + DESERIALIZER = lib_tx.DeserializerZcash + + +class BitcoinMixin(object): + SHORTNAME = "BTC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("00") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('000000000019d6689c085ae165831e93' + '4ff763ae46a2a6c172b3f1b60a8ce26f') + RPC_PORT = 8332 + + +class HOdlcoin(Coin): + NAME = "HOdlcoin" + SHORTNAME = "HODLC" + NET = "mainnet" + BASIC_HEADER_SIZE = 88 + P2PKH_VERBYTE = bytes.fromhex("28") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("a8") + GENESIS_HASH = ('008872e5582924544e5c707ee4b839bb' + '82c28a9e94e917c94b40538d5658c04b') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 258858 + TX_COUNT_HEIGHT = 382138 + TX_PER_BLOCK = 5 + + +class BitcoinCash(BitcoinMixin, Coin): + NAME = "BitcoinCash" + SHORTNAME = "BCH" + TX_COUNT = 246362688 + TX_COUNT_HEIGHT = 511484 + TX_PER_BLOCK = 400 + PEERS = [ + 'electroncash.cascharia.com s50002', + 'bch.electrumx.cash s t', + 'bccarihace4jdcnt.onion t52001 s52002', + 'abc1.hsmiths.com t60001 s60002', + 'electroncash.checksum0.com s t', + 'electrumx-cash.1209k.com s t', + 'electrum.leblancnet.us t50011 s50012', + 'electroncash.dk s t', + 'electrum.imaginary.cash s t', + ] + + +class BitcoinSegwit(BitcoinMixin, Coin): + NAME = "BitcoinSegwit" + DESERIALIZER = lib_tx.DeserializerSegWit + MEMPOOL_HISTOGRAM_REFRESH_SECS = 120 + TX_COUNT = 318337769 + TX_COUNT_HEIGHT = 524213 + TX_PER_BLOCK = 1400 + PEERS = [ + 'btc.smsys.me s995', + 'E-X.not.fyi s t', + 'elec.luggs.co s443', + 'electrum.vom-stausee.de s t', + 'electrum3.hachre.de s t', + 'electrum.hsmiths.com s t', + 'helicarrier.bauerj.eu s t', + 'hsmiths4fyqlw5xw.onion s t', + 'luggscoqbymhvnkp.onion t80', + 'ozahtqwp25chjdjd.onion s t', + 'node.arihanc.com s t', + 'arihancckjge66iv.onion s t', + ] + + +class BitcoinGold(EquihashMixin, BitcoinMixin, Coin): + CHUNK_SIZE = 252 + NAME = "BitcoinGold" + SHORTNAME = "BTG" + FORK_HEIGHT = 491407 + P2PKH_VERBYTE = bytes.fromhex("26") + P2SH_VERBYTES = [bytes.fromhex("17")] + DESERIALIZER = lib_tx.DeserializerEquihashSegWit + TX_COUNT = 265026255 + TX_COUNT_HEIGHT = 499923 + TX_PER_BLOCK = 50 + REORG_LIMIT = 1000 + RPC_PORT = 8338 + PEERS = [ + 'electrumx-eu.bitcoingold.org s50002 t50001', + 'electrumx-us.bitcoingold.org s50002 t50001', + 'electrumx-eu.btcgpu.org s50002 t50001', + 'electrumx-us.btcgpu.org s50002 t50001' + ] + + @classmethod + def header_hash(cls, header): + '''Given a header return hash''' + height, = util.unpack_le_uint32_from(header, 68) + if height >= cls.FORK_HEIGHT: + return double_sha256(header) + else: + return double_sha256(header[:68] + header[100:112]) + + @classmethod + def electrum_header(cls, header, height): + h = super().electrum_header(header, height) + h['reserved'] = hash_to_hex_str(header[72:100]) + h['solution'] = hash_to_hex_str(header[140:]) + return h + + +class BitcoinGoldTestnet(BitcoinGold): + FORK_HEIGHT = 1 + SHORTNAME = "TBTG" + XPUB_VERBYTES = bytes.fromhex("043587CF") + XPRV_VERBYTES = bytes.fromhex("04358394") + P2PKH_VERBYTE = bytes.fromhex("6F") + P2SH_VERBYTES = [bytes.fromhex("C4")] + WIF_BYTE = bytes.fromhex("EF") + TX_COUNT = 0 + TX_COUNT_HEIGHT = 1 + NET = 'testnet' + RPC_PORT = 18338 + GENESIS_HASH = ('00000000e0781ebe24b91eedc293adfe' + 'a2f557b53ec379e78959de3853e6f9f6') + PEERS = [ + 'test-node1.bitcoingold.org s50002', + 'test-node2.bitcoingold.org s50002', + 'test-node3.bitcoingold.org s50002', + 'test-node1.btcgpu.org s50002', + 'test-node2.btcgpu.org s50002', + 'test-node3.btcgpu.org s50002' + ] + + +class BitcoinGoldRegtest(BitcoinGold): + FORK_HEIGHT = 2000 + SHORTNAME = "TBTG" + XPUB_VERBYTES = bytes.fromhex("043587CF") + XPRV_VERBYTES = bytes.fromhex("04358394") + P2PKH_VERBYTE = bytes.fromhex("6F") + P2SH_VERBYTES = [bytes.fromhex("C4")] + WIF_BYTE = bytes.fromhex("EF") + TX_COUNT = 0 + TX_COUNT_HEIGHT = 1 + NET = 'regtest' + RPC_PORT = 18444 + GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328' + 'bf5beb436012afca590b1a11466e2206') + PEERS: List[str] = [] + + +class Emercoin(Coin): + NAME = "Emercoin" + SHORTNAME = "EMC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("21") + P2SH_VERBYTES = [bytes.fromhex("5c")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('00000000bcccd459d036a588d1008fce' + '8da3754b205736f32ddfd35350e84c2d') + TX_COUNT = 217380620 + TX_COUNT_HEIGHT = 464000 + TX_PER_BLOCK = 1700 + VALUE_PER_COIN = 1000000 + RPC_PORT = 6662 + + DESERIALIZER = lib_tx.DeserializerTxTimeAuxPow + + PEERS: List[str] = [] + + @classmethod + def block_header(cls, block, height): + '''Returns the block header given a block and its height.''' + deserializer = cls.DESERIALIZER(block) + + if deserializer.is_merged_block(): + return deserializer.read_header(height, cls.BASIC_HEADER_SIZE) + return block[:cls.static_header_len(height)] + + @classmethod + def header_hash(cls, header): + '''Given a header return hash''' + return double_sha256(header[:cls.BASIC_HEADER_SIZE]) + + +class BitcoinTestnetMixin(object): + SHORTNAME = "XTN" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("043587cf") + XPRV_VERBYTES = bytes.fromhex("04358394") + P2PKH_VERBYTE = bytes.fromhex("6f") + P2SH_VERBYTES = [bytes.fromhex("c4")] + WIF_BYTE = bytes.fromhex("ef") + GENESIS_HASH = ('000000000933ea01ad0ee984209779ba' + 'aec3ced90fa3f408719526f8d77f4943') + REORG_LIMIT = 8000 + TX_COUNT = 12242438 + TX_COUNT_HEIGHT = 1035428 + TX_PER_BLOCK = 21 + RPC_PORT = 18332 + PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} + + +class BitcoinCashTestnet(BitcoinTestnetMixin, Coin): + '''Bitcoin Testnet for Bitcoin Cash daemons.''' + NAME = "BitcoinCash" + PEERS = [ + 'electrum-testnet-abc.criptolayer.net s50112', + 'bchtestnet.arihanc.com t53001 s53002', + 'ciiattqkgzebpp6jofjbrkhvhwmgnsfoayljdcrve2p3qmkbv3duaoyd.onion ' + 't53001 s53002', + ] + + +class BitcoinCashRegtest(BitcoinCashTestnet): + NET = "regtest" + GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328' + 'bf5beb436012afca590b1a11466e2206') + PEERS: List[str] = [] + TX_COUNT = 1 + TX_COUNT_HEIGHT = 1 + + +class BitcoinSegwitTestnet(BitcoinTestnetMixin, Coin): + '''Bitcoin Testnet for Core bitcoind >= 0.13.1.''' + NAME = "BitcoinSegwit" + DESERIALIZER = lib_tx.DeserializerSegWit + PEERS = [ + 'electrum.akinbo.org s t', + 'he36kyperp3kbuxu.onion s t', + 'testnet.hsmiths.com t53011 s53012', + 'hsmithsxurybd7uh.onion t53011 s53012', + 'testnetnode.arihanc.com s t', + 'w3e2orjpiiv2qwem3dw66d7c4krink4nhttngkylglpqe5r22n6n5wid.onion s t', + 'testnet.qtornado.com s t', + ] + + +class BitcoinSegwitRegtest(BitcoinSegwitTestnet): + NAME = "BitcoinSegwit" + NET = "regtest" + GENESIS_HASH = ('0f9188f13cb7b2c71f2a335e3a4fc328' + 'bf5beb436012afca590b1a11466e2206') + PEERS: List[str] = [] + TX_COUNT = 1 + TX_COUNT_HEIGHT = 1 + + +class BitcoinNolnet(BitcoinCash): + '''Bitcoin Unlimited nolimit testnet.''' + NET = "nolnet" + GENESIS_HASH = ('0000000057e31bd2066c939a63b7b862' + '3bd0f10d8c001304bdfc1a7902ae6d35') + PEERS: List[str] = [] + REORG_LIMIT = 8000 + TX_COUNT = 583589 + TX_COUNT_HEIGHT = 8617 + TX_PER_BLOCK = 50 + RPC_PORT = 28332 + PEER_DEFAULT_PORTS = {'t': '52001', 's': '52002'} + + +class Litecoin(Coin): + NAME = "Litecoin" + SHORTNAME = "LTC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("30") + P2SH_VERBYTES = [bytes.fromhex("32"), bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("b0") + GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98' + 'c99d9714d334efa41a195a7e7e04bfe2') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 8908766 + TX_COUNT_HEIGHT = 1105256 + TX_PER_BLOCK = 10 + RPC_PORT = 9332 + REORG_LIMIT = 800 + PEERS = [ + 'elec.luggs.co s444', + 'electrum-ltc.bysh.me s t', + 'electrum-ltc.ddns.net s t', + 'electrum-ltc.wilv.in s t', + 'electrum.cryptomachine.com p1000 s t', + 'electrum.ltc.xurious.com s t', + 'eywr5eubdbbe2laq.onion s50008 t50007', + ] + + +class LitecoinTestnet(Litecoin): + SHORTNAME = "XLT" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("043587cf") + XPRV_VERBYTES = bytes.fromhex("04358394") + P2PKH_VERBYTE = bytes.fromhex("6f") + P2SH_VERBYTES = [bytes.fromhex("3a"), bytes.fromhex("c4")] + WIF_BYTE = bytes.fromhex("ef") + GENESIS_HASH = ('4966625a4b2851d9fdee139e56211a0d' + '88575f59ed816ff5e6a63deb4e3e29a0') + TX_COUNT = 21772 + TX_COUNT_HEIGHT = 20800 + TX_PER_BLOCK = 2 + RPC_PORT = 19332 + REORG_LIMIT = 4000 + PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} + PEERS = [ + 'electrum-ltc.bysh.me s t', + 'electrum.ltc.xurious.com s t', + ] + + +class Viacoin(AuxPowMixin, Coin): + NAME = "Viacoin" + SHORTNAME = "VIA" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("47") + P2SH_VERBYTES = [bytes.fromhex("21")] + WIF_BYTE = bytes.fromhex("c7") + GENESIS_HASH = ('4e9b54001f9976049830128ec0331515' + 'eaabe35a70970d79971da1539a400ba1') + TX_COUNT = 113638 + TX_COUNT_HEIGHT = 3473674 + TX_PER_BLOCK = 30 + RPC_PORT = 5222 + REORG_LIMIT = 5000 + DESERIALIZER: Type = lib_tx.DeserializerAuxPowSegWit + PEERS = [ + 'vialectrum.bitops.me s t', + 'server.vialectrum.org s t', + 'vialectrum.viacoin.net s t', + 'viax1.bitops.me s t', + ] + + +class ViacoinTestnet(Viacoin): + SHORTNAME = "TVI" + NET = "testnet" + P2PKH_VERBYTE = bytes.fromhex("7f") + P2SH_VERBYTES = [bytes.fromhex("c4")] + WIF_BYTE = bytes.fromhex("ff") + GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477' + 'a4cccff2a4767a8eee39c11db367b008') + RPC_PORT = 25222 + REORG_LIMIT = 2500 + PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} + PEERS = [ + 'vialectrum.bysh.me s t', + ] + + +class ViacoinTestnetSegWit(ViacoinTestnet): + NET = "testnet-segwit" + DESERIALIZER = lib_tx.DeserializerSegWit + + +# Source: namecoin.org +class Namecoin(AuxPowMixin, Coin): + NAME = "Namecoin" + SHORTNAME = "NMC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("d7dd6370") + XPRV_VERBYTES = bytes.fromhex("d7dc6e31") + P2PKH_VERBYTE = bytes.fromhex("34") + P2SH_VERBYTES = [bytes.fromhex("0d")] + WIF_BYTE = bytes.fromhex("e4") + GENESIS_HASH = ('000000000062b72c5e2ceb45fbc8587e' + '807c155b0da735e6483dfba2f0a9c770') + TX_COUNT = 4415768 + TX_COUNT_HEIGHT = 329065 + TX_PER_BLOCK = 10 + PEERS = [ + 'elec.luggs.co s446', + ] + BLOCK_PROCESSOR = block_proc.NamecoinBlockProcessor + + @classmethod + def split_name_script(cls, script): + from torba.server.script import _match_ops, Script, ScriptError + + try: + ops = Script.get_ops(script) + except ScriptError: + return None, script + + match = _match_ops + + # Name opcodes + OP_NAME_NEW = OpCodes.OP_1 + OP_NAME_FIRSTUPDATE = OpCodes.OP_2 + OP_NAME_UPDATE = OpCodes.OP_3 + + # Opcode sequences for name operations + NAME_NEW_OPS = [OP_NAME_NEW, -1, OpCodes.OP_2DROP] + NAME_FIRSTUPDATE_OPS = [OP_NAME_FIRSTUPDATE, -1, -1, -1, + OpCodes.OP_2DROP, OpCodes.OP_2DROP] + NAME_UPDATE_OPS = [OP_NAME_UPDATE, -1, -1, OpCodes.OP_2DROP, + OpCodes.OP_DROP] + + name_script_op_count = None + name_pushdata = None + + # Detect name operations; determine count of opcodes. + # Also extract the name field -- we might use that for something in a + # future version. + if match(ops[:len(NAME_NEW_OPS)], NAME_NEW_OPS): + name_script_op_count = len(NAME_NEW_OPS) + elif match(ops[:len(NAME_FIRSTUPDATE_OPS)], NAME_FIRSTUPDATE_OPS): + name_script_op_count = len(NAME_FIRSTUPDATE_OPS) + name_pushdata = ops[1] + elif match(ops[:len(NAME_UPDATE_OPS)], NAME_UPDATE_OPS): + name_script_op_count = len(NAME_UPDATE_OPS) + name_pushdata = ops[1] + + if name_script_op_count is None: + return None, script + + # Find the end position of the name data + n = 0 + for i in range(name_script_op_count): + # Content of this loop is copied from Script.get_ops's loop + op = script[n] + n += 1 + + if op <= OpCodes.OP_PUSHDATA4: + # Raw bytes follow + if op < OpCodes.OP_PUSHDATA1: + dlen = op + elif op == OpCodes.OP_PUSHDATA1: + dlen = script[n] + n += 1 + elif op == OpCodes.OP_PUSHDATA2: + dlen, = struct.unpack(' len(script): + raise IndexError + op = (op, script[n:n + dlen]) + n += dlen + # Strip the name data to yield the address script + address_script = script[n:] + + if name_pushdata is None: + return None, address_script + + normalized_name_op_script = bytearray() + normalized_name_op_script.append(OP_NAME_UPDATE) + normalized_name_op_script.extend(Script.push_data(name_pushdata[1])) + normalized_name_op_script.extend(Script.push_data(bytes([]))) + normalized_name_op_script.append(OpCodes.OP_2DROP) + normalized_name_op_script.append(OpCodes.OP_DROP) + normalized_name_op_script.append(OpCodes.OP_RETURN) + + return bytes(normalized_name_op_script), address_script + + @classmethod + def hashX_from_script(cls, script): + name_op_script, address_script = cls.split_name_script(script) + + return super().hashX_from_script(address_script) + + @classmethod + def address_from_script(cls, script): + name_op_script, address_script = cls.split_name_script(script) + + return super().address_from_script(address_script) + + @classmethod + def name_hashX_from_script(cls, script): + name_op_script, address_script = cls.split_name_script(script) + + if name_op_script is None: + return None + + return super().hashX_from_script(name_op_script) + + +class NamecoinTestnet(Namecoin): + NAME = "Namecoin" + SHORTNAME = "XNM" + NET = "testnet" + P2PKH_VERBYTE = bytes.fromhex("6f") + P2SH_VERBYTES = [bytes.fromhex("c4")] + WIF_BYTE = bytes.fromhex("ef") + GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477' + 'a4cccff2a4767a8eee39c11db367b008') + + +class Dogecoin(AuxPowMixin, Coin): + NAME = "Dogecoin" + SHORTNAME = "DOGE" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("02facafd") + XPRV_VERBYTES = bytes.fromhex("02fac398") + P2PKH_VERBYTE = bytes.fromhex("1e") + P2SH_VERBYTES = [bytes.fromhex("16")] + WIF_BYTE = bytes.fromhex("9e") + GENESIS_HASH = ('1a91e3dace36e2be3bf030a65679fe82' + '1aa1d6ef92e7c9902eb318182c355691') + TX_COUNT = 27583427 + TX_COUNT_HEIGHT = 1604979 + TX_PER_BLOCK = 20 + REORG_LIMIT = 2000 + + +class DogecoinTestnet(Dogecoin): + NAME = "Dogecoin" + SHORTNAME = "XDT" + NET = "testnet" + P2PKH_VERBYTE = bytes.fromhex("71") + P2SH_VERBYTES = [bytes.fromhex("c4")] + WIF_BYTE = bytes.fromhex("f1") + GENESIS_HASH = ('bb0a78264637406b6360aad926284d54' + '4d7049f45189db5664f3c4d07350559e') + + +# Source: https://github.com/motioncrypto/motion +class Motion(Coin): + NAME = "Motion" + SHORTNAME = "XMN" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488B21E") + XPRV_VERBYTES = bytes.fromhex("0488ADE4") + GENESIS_HASH = ('000001e9dc60dd2618e91f7b90141349' + '22c374496b61c1a272519b1c39979d78') + P2PKH_VERBYTE = bytes.fromhex("32") + P2SH_VERBYTES = [bytes.fromhex("12")] + WIF_BYTE = bytes.fromhex("80") + TX_COUNT_HEIGHT = 54353 + TX_COUNT = 92701 + TX_PER_BLOCK = 4 + RPC_PORT = 3385 + SESSIONCLS = DashElectrumX + DAEMON = daemon.DashDaemon + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import x16r_hash + return x16r_hash.getPoWHash(header) + + +# Source: https://github.com/dashpay/dash +class Dash(Coin): + NAME = "Dash" + SHORTNAME = "DASH" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("02fe52cc") + XPRV_VERBYTES = bytes.fromhex("02fe52f8") + GENESIS_HASH = ('00000ffd590b1485b3caadc19b22e637' + '9c733355108f107a430458cdf3407ab6') + P2PKH_VERBYTE = bytes.fromhex("4c") + P2SH_VERBYTES = [bytes.fromhex("10")] + WIF_BYTE = bytes.fromhex("cc") + TX_COUNT_HEIGHT = 569399 + TX_COUNT = 2157510 + TX_PER_BLOCK = 4 + RPC_PORT = 9998 + PEERS = [ + 'electrum.dash.org s t', + 'electrum.masternode.io s t', + 'electrum-drk.club s t', + 'dashcrypto.space s t', + 'electrum.dash.siampm.com s t', + 'wl4sfwq2hwxnodof.onion s t', + ] + SESSIONCLS = DashElectrumX + DAEMON = daemon.DashDaemon + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import x11_hash + return x11_hash.getPoWHash(header) + + +class DashTestnet(Dash): + SHORTNAME = "tDASH" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("3a805837") + XPRV_VERBYTES = bytes.fromhex("3a8061a0") + GENESIS_HASH = ('00000bafbc94add76cb75e2ec9289483' + '7288a481e5c005f6563d91623bf8bc2c') + P2PKH_VERBYTE = bytes.fromhex("8c") + P2SH_VERBYTES = [bytes.fromhex("13")] + WIF_BYTE = bytes.fromhex("ef") + TX_COUNT_HEIGHT = 101619 + TX_COUNT = 132681 + TX_PER_BLOCK = 1 + RPC_PORT = 19998 + PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} + PEERS = [ + 'electrum.dash.siampm.com s t', + 'dasht.random.re s54002 t54001', + ] + + +class Argentum(AuxPowMixin, Coin): + NAME = "Argentum" + SHORTNAME = "ARG" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("17") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("97") + GENESIS_HASH = ('88c667bc63167685e4e4da058fffdfe8' + 'e007e5abffd6855de52ad59df7bb0bb2') + TX_COUNT = 2263089 + TX_COUNT_HEIGHT = 2050260 + TX_PER_BLOCK = 2000 + RPC_PORT = 13581 + + +class ArgentumTestnet(Argentum): + SHORTNAME = "XRG" + NET = "testnet" + P2PKH_VERBYTE = bytes.fromhex("6f") + P2SH_VERBYTES = [bytes.fromhex("c4")] + WIF_BYTE = bytes.fromhex("ef") + REORG_LIMIT = 2000 + + +class DigiByte(Coin): + NAME = "DigiByte" + SHORTNAME = "DGB" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("1E") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('7497ea1b465eb39f1c8f507bc877078f' + 'e016d6fcb6dfad3a64c98dcc6e1e8496') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 1046018 + TX_COUNT_HEIGHT = 1435000 + TX_PER_BLOCK = 1000 + RPC_PORT = 12022 + + +class DigiByteTestnet(DigiByte): + NET = "testnet" + P2PKH_VERBYTE = bytes.fromhex("6f") + P2SH_VERBYTES = [bytes.fromhex("c4")] + WIF_BYTE = bytes.fromhex("ef") + GENESIS_HASH = ('b5dca8039e300198e5fe7cd23bdd1728' + 'e2a444af34c447dbd0916fa3430a68c2') + RPC_PORT = 15022 + REORG_LIMIT = 2000 + + +class FairCoin(Coin): + NAME = "FairCoin" + SHORTNAME = "FAIR" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("5f") + P2SH_VERBYTES = [bytes.fromhex("24")] + WIF_BYTE = bytes.fromhex("df") + GENESIS_HASH = ('beed44fa5e96150d95d56ebd5d262578' + '1825a9407a5215dd7eda723373a0a1d7') + BASIC_HEADER_SIZE = 108 + HEADER_VALUES = ['version', 'prev_block_hash', 'merkle_root', + 'payload_hash', 'timestamp', 'creatorId'] + HEADER_UNPACK = struct.Struct('< I 32s 32s 32s I I').unpack_from + TX_COUNT = 505 + TX_COUNT_HEIGHT = 470 + TX_PER_BLOCK = 1 + RPC_PORT = 40405 + PEER_DEFAULT_PORTS = {'t': '51811', 's': '51812'} + PEERS = [ + 'electrum.faircoin.world s', + 'electrumfair.punto0.org s', + ] + + @classmethod + def block(cls, raw_block, height): + '''Return a Block namedtuple given a raw block and its height.''' + if height > 0: + return super().block(raw_block, height) + else: + return Block(raw_block, cls.block_header(raw_block, height), []) + + @classmethod + def electrum_header(cls, header, height): + h = super().electrum_header(header, height) + h['payload_hash'] = hash_to_hex_str(h['payload_hash']) + return h + + +class Zcash(EquihashMixin, Coin): + NAME = "Zcash" + SHORTNAME = "ZEC" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("1CB8") + P2SH_VERBYTES = [bytes.fromhex("1CBD")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('00040fe8ec8471911baa1db1266ea15d' + 'd06b4a8a5c453883c000b031973dce08') + DESERIALIZER = lib_tx.DeserializerZcash + TX_COUNT = 329196 + TX_COUNT_HEIGHT = 68379 + TX_PER_BLOCK = 5 + RPC_PORT = 8232 + REORG_LIMIT = 800 + + +class ZcashTestnet(Zcash): + SHORTNAME = "TAZ" + NET = "testnet" + P2PKH_VERBYTE = bytes.fromhex("1D25") + P2SH_VERBYTES = [bytes.fromhex("1CBA")] + WIF_BYTE = bytes.fromhex("EF") + GENESIS_HASH = ('05a60a92d99d85997cce3b87616c089f' + '6124d7342af37106edc76126334a2c38') + TX_COUNT = 242312 + TX_COUNT_HEIGHT = 321685 + TX_PER_BLOCK = 2 + RPC_PORT = 18232 + + +class SnowGem(EquihashMixin, Coin): + NAME = "SnowGem" + SHORTNAME = "SNG" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("1C28") + P2SH_VERBYTES = [bytes.fromhex("1C2D")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('00068b35729d9d2b0c294ff1fe9af009' + '4740524311a131de40e7f705e4c29a5b') + DESERIALIZER = lib_tx.DeserializerZcash + TX_COUNT = 140698 + TX_COUNT_HEIGHT = 102802 + TX_PER_BLOCK = 2 + RPC_PORT = 16112 + REORG_LIMIT = 800 + CHUNK_SIZE = 200 + + @classmethod + def electrum_header(cls, header, height): + h = super().electrum_header(header, height) + h['n_solution'] = base64.b64encode(lib_tx.Deserializer( + header, start=140)._read_varbytes()).decode('utf8') + return h + + +class BitcoinZ(EquihashMixin, Coin): + NAME = "BitcoinZ" + SHORTNAME = "BTCZ" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("1CB8") + P2SH_VERBYTES = [bytes.fromhex("1CBD")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('f499ee3d498b4298ac6a64205b8addb7' + 'c43197e2a660229be65db8a4534d75c1') + DESERIALIZER = lib_tx.DeserializerZcash + TX_COUNT = 171976 + TX_COUNT_HEIGHT = 81323 + TX_PER_BLOCK = 3 + RPC_PORT = 1979 + REORG_LIMIT = 800 + + +class Hush(EquihashMixin, Coin): + NAME = "Hush" + SHORTNAME = "HUSH" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("1CB8") + P2SH_VERBYTES = [bytes.fromhex("1CBD")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('0003a67bc26fe564b75daf11186d3606' + '52eb435a35ba3d9d3e7e5d5f8e62dc17') + DESERIALIZER = lib_tx.DeserializerZcash + TX_COUNT = 329196 + TX_COUNT_HEIGHT = 68379 + TX_PER_BLOCK = 5 + RPC_PORT = 8822 + REORG_LIMIT = 800 + + +class Zclassic(EquihashMixin, Coin): + NAME = "Zclassic" + SHORTNAME = "ZCL" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("1CB8") + P2SH_VERBYTES = [bytes.fromhex("1CBD")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('0007104ccda289427919efc39dc9e4d4' + '99804b7bebc22df55f8b834301260602') + DESERIALIZER = lib_tx.DeserializerZcash + TX_COUNT = 329196 + TX_COUNT_HEIGHT = 68379 + TX_PER_BLOCK = 5 + RPC_PORT = 8023 + REORG_LIMIT = 800 + + +class Koto(Coin): + NAME = "Koto" + SHORTNAME = "KOTO" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("1836") + P2SH_VERBYTES = [bytes.fromhex("183B")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('6d424c350729ae633275d51dc3496e16' + 'cd1b1d195c164da00f39c499a2e9959e') + DESERIALIZER = lib_tx.DeserializerZcash + TX_COUNT = 158914 + TX_COUNT_HEIGHT = 67574 + TX_PER_BLOCK = 3 + RPC_PORT = 8432 + REORG_LIMIT = 800 + PEERS = [ + 'fr.kotocoin.info s t', + 'electrum.kotocoin.info s t', + ] + + +class KotoTestnet(Koto): + SHORTNAME = "TOKO" + NET = "testnet" + P2PKH_VERBYTE = bytes.fromhex("18A4") + P2SH_VERBYTES = [bytes.fromhex("1839")] + WIF_BYTE = bytes.fromhex("EF") + GENESIS_HASH = ('bf84afbde20c2d213b68b231ddb585ab' + '616ef7567226820f00d9b397d774d2f0') + TX_COUNT = 91144 + TX_COUNT_HEIGHT = 89662 + TX_PER_BLOCK = 1 + RPC_PORT = 18432 + PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} + PEERS = [ + 'testnet.kotocoin.info s t', + ] + + +class Komodo(KomodoMixin, EquihashMixin, Coin): + NAME = "Komodo" + SHORTNAME = "KMD" + NET = "mainnet" + TX_COUNT = 693629 + TX_COUNT_HEIGHT = 491777 + TX_PER_BLOCK = 2 + RPC_PORT = 7771 + REORG_LIMIT = 800 + PEERS: List[str] = [] + + +class Monaize(KomodoMixin, EquihashMixin, Coin): + NAME = "Monaize" + SHORTNAME = "MNZ" + NET = "mainnet" + TX_COUNT = 256 + TX_COUNT_HEIGHT = 128 + TX_PER_BLOCK = 2 + RPC_PORT = 14337 + REORG_LIMIT = 800 + PEERS: List[str] = [] + + +class Einsteinium(Coin): + NAME = "Einsteinium" + SHORTNAME = "EMC2" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("21") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("b0") + GENESIS_HASH = ('4e56204bb7b8ac06f860ff1c845f03f9' + '84303b5b97eb7b42868f714611aed94b') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 2087559 + TX_COUNT_HEIGHT = 1358517 + TX_PER_BLOCK = 2 + RPC_PORT = 41879 + REORG_LIMIT = 2000 + + +class Blackcoin(ScryptMixin, Coin): + NAME = "Blackcoin" + SHORTNAME = "BLK" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("19") + P2SH_VERBYTES = [bytes.fromhex("55")] + WIF_BYTE = bytes.fromhex("99") + GENESIS_HASH = ('000001faef25dec4fbcf906e6242621d' + 'f2c183bf232f263d0ba5b101911e4563') + DAEMON = daemon.LegacyRPCDaemon + TX_COUNT = 4594999 + TX_COUNT_HEIGHT = 1667070 + TX_PER_BLOCK = 3 + RPC_PORT = 15715 + REORG_LIMIT = 5000 + + +class Bitbay(ScryptMixin, Coin): + NAME = "Bitbay" + SHORTNAME = "BAY" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("19") + P2SH_VERBYTES = [bytes.fromhex("55")] + WIF_BYTE = bytes.fromhex("99") + GENESIS_HASH = ('0000075685d3be1f253ce777174b1594' + '354e79954d2a32a6f77fe9cba00e6467') + TX_COUNT = 4594999 + TX_COUNT_HEIGHT = 1667070 + TX_PER_BLOCK = 3 + RPC_PORT = 19914 + REORG_LIMIT = 5000 + + +class Peercoin(Coin): + NAME = "Peercoin" + SHORTNAME = "PPC" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("37") + P2SH_VERBYTES = [bytes.fromhex("75")] + WIF_BYTE = bytes.fromhex("b7") + GENESIS_HASH = ('0000000032fe677166d54963b62a4677' + 'd8957e87c508eaa4fd7eb1c880cd27e3') + DESERIALIZER = lib_tx.DeserializerTxTime + DAEMON = daemon.LegacyRPCDaemon + TX_COUNT = 1207356 + TX_COUNT_HEIGHT = 306425 + TX_PER_BLOCK = 4 + RPC_PORT = 9902 + REORG_LIMIT = 5000 + + +class Reddcoin(Coin): + NAME = "Reddcoin" + SHORTNAME = "RDD" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("3d") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("bd") + GENESIS_HASH = ('b868e0d95a3c3c0e0dadc67ee587aaf9' + 'dc8acbf99e3b4b3110fad4eb74c1decc') + DESERIALIZER = lib_tx.DeserializerReddcoin + TX_COUNT = 5413508 + TX_COUNT_HEIGHT = 1717382 + TX_PER_BLOCK = 3 + RPC_PORT = 45443 + + +class TokenPay(ScryptMixin, Coin): + NAME = "TokenPay" + SHORTNAME = "TPAY" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("41") + P2SH_VERBYTES = [bytes.fromhex("7e")] + WIF_BYTE = bytes.fromhex("b3") + GENESIS_HASH = ('000008b71ab32e585a23f0de642dc113' + '740144e94c0ece047751e9781f953ae9') + DESERIALIZER = lib_tx.DeserializerTokenPay + DAEMON = daemon.LegacyRPCDaemon + TX_COUNT = 147934 + TX_COUNT_HEIGHT = 73967 + TX_PER_BLOCK = 100 + RPC_PORT = 8800 + REORG_LIMIT = 500 + XPUB_VERBYTES = bytes.fromhex("0488B21E") + XPRV_VERBYTES = bytes.fromhex("0488ADE4") + + PEERS = [ + "electrum-us.tpay.ai s", + "electrum-eu.tpay.ai s", + ] + + +class Vertcoin(Coin): + NAME = "Vertcoin" + SHORTNAME = "VTC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488B21E") + XPRV_VERBYTES = bytes.fromhex("0488ADE4") + P2PKH_VERBYTE = bytes.fromhex("47") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('4d96a915f49d40b1e5c2844d1ee2dccb' + '90013a990ccea12c492d22110489f0c4') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 2383423 + TX_COUNT_HEIGHT = 759076 + TX_PER_BLOCK = 3 + RPC_PORT = 5888 + REORG_LIMIT = 1000 + + +class Monacoin(Coin): + NAME = "Monacoin" + SHORTNAME = "MONA" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488B21E") + XPRV_VERBYTES = bytes.fromhex("0488ADE4") + P2PKH_VERBYTE = bytes.fromhex("32") + P2SH_VERBYTES = [bytes.fromhex("37"), bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("B0") + GENESIS_HASH = ('ff9f1c0116d19de7c9963845e129f9ed' + '1bfc0b376eb54fd7afa42e0d418c8bb6') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 2568580 + TX_COUNT_HEIGHT = 1029766 + TX_PER_BLOCK = 2 + RPC_PORT = 9402 + REORG_LIMIT = 1000 + PEERS = [ + 'electrumx.tamami-foundation.org s t', + 'electrumx2.tamami-foundation.org s t', + 'electrumx3.tamami-foundation.org s t', + 'electrumx1.monacoin.nl s t', + 'electrumx2.monacoin.nl s t', + 'electrumx1.monacoin.ninja s t', + 'electrumx2.monacoin.ninja s t', + 'electrumx2.movsign.info s t', + 'electrum-mona.bitbank.cc s t', + 'ri7rzlmdaf4eqbza.onion s t', + ] + + +class MonacoinTestnet(Monacoin): + SHORTNAME = "XMN" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("043587CF") + XPRV_VERBYTES = bytes.fromhex("04358394") + P2PKH_VERBYTE = bytes.fromhex("6F") + P2SH_VERBYTES = [bytes.fromhex("75"), bytes.fromhex("C4")] + WIF_BYTE = bytes.fromhex("EF") + GENESIS_HASH = ('a2b106ceba3be0c6d097b2a6a6aacf9d' + '638ba8258ae478158f449c321061e0b2') + TX_COUNT = 83602 + TX_COUNT_HEIGHT = 83252 + TX_PER_BLOCK = 1 + RPC_PORT = 19402 + REORG_LIMIT = 1000 + PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'} + PEERS = [ + 'electrumx1.testnet.monacoin.ninja s t', + 'electrumx1.testnet.monacoin.nl s t', + ] + + +class Crown(AuxPowMixin, Coin): + NAME = "Crown" + SHORTNAME = "CRW" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("00") + P2SH_VERBYTES = [bytes.fromhex("1c")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('0000000085370d5e122f64f4ab19c686' + '14ff3df78c8d13cb814fd7e69a1dc6da') + TX_COUNT = 13336629 + TX_COUNT_HEIGHT = 1268206 + TX_PER_BLOCK = 10 + RPC_PORT = 9341 + REORG_LIMIT = 1000 + PEERS = [ + 'sgp-crwseed.crowndns.info s t', + 'blr-crwseed.crowndns.info s t', + 'sfo-crwseed.crowndns.info s t', + 'nyc-crwseed.crowndns.info s t', + 'ams-crwseed.crowndns.info s t', + 'tor-crwseed.crowndns.info s t', + 'lon-crwseed.crowndns.info s t', + 'fra-crwseed.crowndns.info s t', + ] + + +class Fujicoin(Coin): + NAME = "Fujicoin" + SHORTNAME = "FJC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("24") + P2SH_VERBYTES = [bytes.fromhex("10")] + WIF_BYTE = bytes.fromhex("a4") + GENESIS_HASH = ('adb6d9cfd74075e7f91608add4bd2a2e' + 'a636f70856183086842667a1597714a0') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 170478 + TX_COUNT_HEIGHT = 1521676 + TX_PER_BLOCK = 1 + RPC_PORT = 3776 + REORG_LIMIT = 1000 + + +class Neblio(ScryptMixin, Coin): + NAME = "Neblio" + SHORTNAME = "NEBL" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("35") + P2SH_VERBYTES = [bytes.fromhex("70")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('7286972be4dbc1463d256049b7471c25' + '2e6557e222cab9be73181d359cd28bcc') + TX_COUNT = 23675 + TX_COUNT_HEIGHT = 22785 + TX_PER_BLOCK = 1 + RPC_PORT = 6326 + REORG_LIMIT = 1000 + + +class Bitzeny(Coin): + NAME = "Bitzeny" + SHORTNAME = "ZNY" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("51") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('000009f7e55e9e3b4781e22bd87a7cfa' + '4acada9e4340d43ca738bf4e9fb8f5ce') + ESTIMATE_FEE = 0.001 + RELAY_FEE = 0.001 + DAEMON = daemon.FakeEstimateFeeDaemon + TX_COUNT = 1408733 + TX_COUNT_HEIGHT = 1015115 + TX_PER_BLOCK = 1 + RPC_PORT = 9252 + REORG_LIMIT = 1000 + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import zny_yescrypt + return zny_yescrypt.getPoWHash(header) + + +class CanadaeCoin(AuxPowMixin, Coin): + NAME = "CanadaeCoin" + SHORTNAME = "CDN" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("1C") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("9c") + GENESIS_HASH = ('863626dadaef221e2e2f30ff3dacae44' + 'cabdae9e0028058072181b3fb675d94a') + ESTIMATE_FEE = 0.0001 + RELAY_FEE = 0.0001 + DAEMON = daemon.FakeEstimateFeeDaemon + TX_COUNT = 3455905 + TX_COUNT_HEIGHT = 3645419 + TX_PER_BLOCK = 1 + RPC_PORT = 34330 + REORG_LIMIT = 1000 + + +class Denarius(Coin): + NAME = "Denarius" + SHORTNAME = "DNR" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("1E") # Address starts with a D + P2SH_VERBYTES = [bytes.fromhex("5A")] + WIF_BYTE = bytes.fromhex("9E") # WIF starts with a 6 + GENESIS_HASH = ('00000d5dbbda01621cfc16bbc1f9bf32' + '64d641a5dbf0de89fd0182c2c4828fcd') + DESERIALIZER = lib_tx.DeserializerTxTime + TX_COUNT = 4230 + RPC_PORT = 32339 + ESTIMATE_FEE = 0.00001 + RELAY_FEE = 0.00001 + DAEMON = daemon.FakeEstimateFeeDaemon + TX_COUNT_HEIGHT = 306187 + TX_PER_BLOCK = 4000 + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import tribus_hash + return tribus_hash.getPoWHash(header) + + +class DenariusTestnet(Denarius): + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("043587cf") + XPRV_VERBYTES = bytes.fromhex("04358394") + P2PKH_VERBYTE = bytes.fromhex("12") + P2SH_VERBYTES = [bytes.fromhex("74")] + WIF_BYTE = bytes.fromhex("ef") + GENESIS_HASH = ('000086bfe8264d241f7f8e5393f74778' + '4b8ca2aa98bdd066278d590462a4fdb4') + RPC_PORT = 32338 + REORG_LIMIT = 2000 + + +class Sibcoin(Dash): + NAME = "Sibcoin" + SHORTNAME = "SIB" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("3F") + P2SH_VERBYTES = [bytes.fromhex("28")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('00000c492bf73490420868bc577680bf' + 'c4c60116e7e85343bc624787c21efa4c') + DAEMON = daemon.DashDaemon + TX_COUNT = 1000 + TX_COUNT_HEIGHT = 10000 + TX_PER_BLOCK = 1 + RPC_PORT = 1944 + REORG_LIMIT = 1000 + PEERS: List[str] = [] + + @classmethod + def header_hash(cls, header): + ''' + Given a header return the hash for sibcoin. + Need to download `x11_gost_hash` module + Source code: https://github.com/ivansib/x11_gost_hash + ''' + import x11_gost_hash + return x11_gost_hash.getPoWHash(header) + + +class Chips(Coin): + NAME = "Chips" + SHORTNAME = "CHIPS" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("3c") + P2SH_VERBYTES = [bytes.fromhex("55")] + WIF_BYTE = bytes.fromhex("bc") + GENESIS_HASH = ('0000006e75f6aa0efdbf7db03132aa4e' + '4d0c84951537a6f5a7c39a0a9d30e1e7') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 145290 + TX_COUNT_HEIGHT = 318637 + TX_PER_BLOCK = 2 + RPC_PORT = 57776 + REORG_LIMIT = 800 + + +class Feathercoin(Coin): + NAME = "Feathercoin" + SHORTNAME = "FTC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488BC26") + XPRV_VERBYTES = bytes.fromhex("0488DAEE") + P2PKH_VERBYTE = bytes.fromhex("0E") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("8E") + GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98' + 'c99d9714d334efa41a195a7e7e04bfe2') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 3170843 + TX_COUNT_HEIGHT = 1981777 + TX_PER_BLOCK = 2 + RPC_PORT = 9337 + REORG_LIMIT = 2000 + PEERS = [ + 'electrumx-ch-1.feathercoin.ch s t', + ] + + +class UFO(Coin): + NAME = "UniformFiscalObject" + SHORTNAME = "UFO" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488B21E") + XPRV_VERBYTES = bytes.fromhex("0488ADE4") + P2PKH_VERBYTE = bytes.fromhex("1B") + P2SH_VERBYTES = [bytes.fromhex("44")] + WIF_BYTE = bytes.fromhex("9B") + GENESIS_HASH = ('ba1d39b4928ab03d813d952daf65fb77' + '97fcf538a9c1b8274f4edc8557722d13') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 1608926 + TX_COUNT_HEIGHT = 1300154 + TX_PER_BLOCK = 2 + RPC_PORT = 9888 + REORG_LIMIT = 2000 + PEERS = [ + 'electrumx1.ufobject.com s t', + ] + + +class Newyorkcoin(AuxPowMixin, Coin): + NAME = "Newyorkcoin" + SHORTNAME = "NYC" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("3c") + P2SH_VERBYTES = [bytes.fromhex("16")] + WIF_BYTE = bytes.fromhex("bc") + GENESIS_HASH = ('5597f25c062a3038c7fd815fe46c67de' + 'dfcb3c839fbc8e01ed4044540d08fe48') + TX_COUNT = 5161944 + TX_COUNT_HEIGHT = 3948743 + TX_PER_BLOCK = 2 + REORG_LIMIT = 2000 + + +class NewyorkcoinTestnet(Newyorkcoin): + SHORTNAME = "tNYC" + NET = "testnet" + P2PKH_VERBYTE = bytes.fromhex("71") + P2SH_VERBYTES = [bytes.fromhex("c4")] + WIF_BYTE = bytes.fromhex("f1") + GENESIS_HASH = ('24463e4d3c625b0a9059f309044c2cf0' + 'd7e196cf2a6ecce901f24f681be33c8f') + TX_COUNT = 5161944 + TX_COUNT_HEIGHT = 3948743 + TX_PER_BLOCK = 2 + REORG_LIMIT = 2000 + + +class Bitcore(BitcoinMixin, Coin): + NAME = "Bitcore" + SHORTNAME = "BTX" + P2PKH_VERBYTE = bytes.fromhex("03") + P2SH_VERBYTES = [bytes.fromhex("7D")] + WIF_BYTE = bytes.fromhex("80") + DESERIALIZER = lib_tx.DeserializerSegWit + GENESIS_HASH = ('604148281e5c4b7f2487e5d03cd60d8e' + '6f69411d613f6448034508cea52e9574') + TX_COUNT = 126979 + TX_COUNT_HEIGHT = 126946 + TX_PER_BLOCK = 2 + RPC_PORT = 8556 + + +class GameCredits(Coin): + NAME = "GameCredits" + SHORTNAME = "GAME" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("26") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("a6") + GENESIS_HASH = ('91ec5f25ee9a0ffa1af7d4da4db9a552' + '228dd2dc77cdb15b738be4e1f55f30ee') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 316796 + TX_COUNT_HEIGHT = 2040250 + TX_PER_BLOCK = 2 + RPC_PORT = 40001 + REORG_LIMIT = 1000 + + +class Machinecoin(Coin): + NAME = "Machinecoin" + SHORTNAME = "MAC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("32") + P2SH_VERBYTES = [bytes.fromhex("26"), bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("b2") + GENESIS_HASH = ('6a1f879bcea5471cbfdee1fd0cb2ddcc' + '4fed569a500e352d41de967703e83172') + DESERIALIZER = lib_tx.DeserializerSegWit + TX_COUNT = 137641 + TX_COUNT_HEIGHT = 513020 + TX_PER_BLOCK = 2 + RPC_PORT = 40332 + REORG_LIMIT = 800 + + +class BitcoinAtom(Coin): + NAME = "BitcoinAtom" + SHORTNAME = "BCA" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("17") + P2SH_VERBYTES = [bytes.fromhex("0a")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('000000000019d6689c085ae165831e93' + '4ff763ae46a2a6c172b3f1b60a8ce26f') + STATIC_BLOCK_HEADERS = False + DESERIALIZER = lib_tx.DeserializerBitcoinAtom + HEADER_SIZE_POST_FORK = 84 + BLOCK_PROOF_OF_STAKE = 0x01 + BLOCK_PROOF_OF_STAKE_FLAGS = b'\x01\x00\x00\x00' + TX_COUNT = 295158744 + TX_COUNT_HEIGHT = 589197 + TX_PER_BLOCK = 10 + RPC_PORT = 9136 + REORG_LIMIT = 5000 + + @classmethod + def header_hash(cls, header): + '''Given a header return hash''' + header_to_be_hashed = header[:cls.BASIC_HEADER_SIZE] + # New block header format has some extra flags in the end + if len(header) == cls.HEADER_SIZE_POST_FORK: + flags, = util.unpack_le_uint32_from(header, len(header) - 4) + # Proof of work blocks have special serialization + if flags & cls.BLOCK_PROOF_OF_STAKE != 0: + header_to_be_hashed += cls.BLOCK_PROOF_OF_STAKE_FLAGS + + return double_sha256(header_to_be_hashed) + + @classmethod + def block_header(cls, block, height): + '''Return the block header bytes''' + deserializer = cls.DESERIALIZER(block) + return deserializer.read_header(height, cls.BASIC_HEADER_SIZE) + + +class Decred(Coin): + NAME = "Decred" + SHORTNAME = "DCR" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("02fda926") + XPRV_VERBYTES = bytes.fromhex("02fda4e8") + P2PKH_VERBYTE = bytes.fromhex("073f") + P2SH_VERBYTES = [bytes.fromhex("071a")] + WIF_BYTE = bytes.fromhex("22de") + GENESIS_HASH = ('298e5cc3d985bfe7f81dc135f360abe0' + '89edd4396b86d2de66b0cef42b21d980') + BASIC_HEADER_SIZE = 180 + HEADER_HASH = lib_tx.DeserializerDecred.blake256 + DESERIALIZER = lib_tx.DeserializerDecred + DAEMON = daemon.DecredDaemon + BLOCK_PROCESSOR = block_proc.DecredBlockProcessor + ENCODE_CHECK = partial(Base58.encode_check, + hash_fn=lib_tx.DeserializerDecred.blake256d) + DECODE_CHECK = partial(Base58.decode_check, + hash_fn=lib_tx.DeserializerDecred.blake256d) + HEADER_VALUES = ['version', 'prev_block_hash', 'merkle_root', 'stake_root', + 'vote_bits', 'final_state', 'voters', 'fresh_stake', + 'revocations', 'pool_size', 'bits', 'sbits', + 'block_height', 'size', 'timestamp', 'nonce', + 'extra_data', 'stake_version'] + HEADER_UNPACK = struct.Struct( + '< i 32s 32s 32s H 6s H B B I I Q I I I I 32s I').unpack_from + TX_COUNT = 4629388 + TX_COUNT_HEIGHT = 260628 + TX_PER_BLOCK = 17 + REORG_LIMIT = 1000 + RPC_PORT = 9109 + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + return cls.HEADER_HASH(header) + + @classmethod + def block(cls, raw_block, height): + '''Return a Block namedtuple given a raw block and its height.''' + if height > 0: + return super().block(raw_block, height) + else: + return Block(raw_block, cls.block_header(raw_block, height), []) + + @classmethod + def electrum_header(cls, header, height): + h = super().electrum_header(header, height) + h['stake_root'] = hash_to_hex_str(h['stake_root']) + h['final_state'] = hash_to_hex_str(h['final_state']) + h['extra_data'] = hash_to_hex_str(h['extra_data']) + return h + + +class DecredTestnet(Decred): + SHORTNAME = "tDCR" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("043587d1") + XPRV_VERBYTES = bytes.fromhex("04358397") + P2PKH_VERBYTE = bytes.fromhex("0f21") + P2SH_VERBYTES = [bytes.fromhex("0efc")] + WIF_BYTE = bytes.fromhex("230e") + GENESIS_HASH = ( + 'a649dce53918caf422e9c711c858837e08d626ecfcd198969b24f7b634a49bac') + BASIC_HEADER_SIZE = 180 + ALLOW_ADVANCING_ERRORS = True + TX_COUNT = 217380620 + TX_COUNT_HEIGHT = 464000 + TX_PER_BLOCK = 1800 + REORG_LIMIT = 1000 + RPC_PORT = 19109 + + +class Axe(Dash): + NAME = "Axe" + SHORTNAME = "AXE" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("02fe52cc") + XPRV_VERBYTES = bytes.fromhex("02fe52f8") + P2PKH_VERBYTE = bytes.fromhex("37") + P2SH_VERBYTES = [bytes.fromhex("10")] + WIF_BYTE = bytes.fromhex("cc") + GENESIS_HASH = ('00000c33631ca6f2f61368991ce2dc03' + '306b5bb50bf7cede5cfbba6db38e52e6') + DAEMON = daemon.DashDaemon + TX_COUNT = 18405 + TX_COUNT_HEIGHT = 30237 + TX_PER_BLOCK = 1 + RPC_PORT = 9337 + REORG_LIMIT = 1000 + PEERS: List[str] = [] + + @classmethod + def header_hash(cls, header): + ''' + Given a header return the hash for AXE. + Need to download `axe_hash` module + Source code: https://github.com/AXErunners/axe_hash + ''' + import x11_hash + return x11_hash.getPoWHash(header) + + +class Xuez(Coin): + NAME = "Xuez" + SHORTNAME = "XUEZ" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("022d2533") + XPRV_VERBYTES = bytes.fromhex("0221312b") + P2PKH_VERBYTE = bytes.fromhex("48") + P2SH_VERBYTES = [bytes.fromhex("12")] + WIF_BYTE = bytes.fromhex("d4") + GENESIS_HASH = ('000000e1febc39965b055e8e0117179a' + '4d18e24e7aaa0c69864c4054b4f29445') + TX_COUNT = 30000 + TX_COUNT_HEIGHT = 15000 + TX_PER_BLOCK = 1 + RPC_PORT = 41799 + REORG_LIMIT = 1000 + BASIC_HEADER_SIZE = 112 + PEERS: List[str] = [] + + @classmethod + def header_hash(cls, header): + ''' + Given a header return the hash for Xuez. + Need to download `xevan_hash` module + Source code: https://github.com/xuez/xuez + ''' + version, = util.unpack_le_uint32_from(header) + + import xevan_hash + + if version == 1: + return xevan_hash.getPoWHash(header[:80]) + else: + return xevan_hash.getPoWHash(header) + + @classmethod + def electrum_header(cls, header, height): + h = super().electrum_header(header, height) + if h['version'] > 1: + h['nAccumulatorCheckpoint'] = hash_to_hex_str(header[80:]) + return h + + +class Pac(Coin): + NAME = "PAC" + SHORTNAME = "PAC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488B21E") + XPRV_VERBYTES = bytes.fromhex("0488ADE4") + GENESIS_HASH = ('00000354655ff039a51273fe61d3b493' + 'bd2897fe6c16f732dbc4ae19f04b789e') + P2PKH_VERBYTE = bytes.fromhex("37") + P2SH_VERBYTES = [bytes.fromhex("0A")] + WIF_BYTE = bytes.fromhex("CC") + TX_COUNT_HEIGHT = 14939 + TX_COUNT = 23708 + TX_PER_BLOCK = 2 + RPC_PORT = 7111 + PEERS = [ + 'electrum.paccoin.io s t', + 'electro-pac.paccoin.io s t' + ] + SESSIONCLS = DashElectrumX + DAEMON = daemon.DashDaemon + ESTIMATE_FEE = 0.00001 + RELAY_FEE = 0.00001 + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import x11_hash + return x11_hash.getPoWHash(header) + + +class PacTestnet(Pac): + SHORTNAME = "tPAC" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("043587CF") + XPRV_VERBYTES = bytes.fromhex("04358394") + GENESIS_HASH = ('00000da63bd9478b655ef6bf1bf76cd9' + 'af05202ab68643f9091e049b2b5280ed') + P2PKH_VERBYTE = bytes.fromhex("78") + P2SH_VERBYTES = [bytes.fromhex("0E")] + WIF_BYTE = bytes.fromhex("EF") + TX_COUNT_HEIGHT = 16275 + TX_COUNT = 16275 + TX_PER_BLOCK = 1 + RPC_PORT = 17111 + + +class Polis(Coin): + NAME = "Polis" + SHORTNAME = "POLIS" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("03E25D7E") + XPRV_VERBYTES = bytes.fromhex("03E25945") + GENESIS_HASH = ('000009701eb781a8113b1af1d814e2f0' + '60f6408a2c990db291bc5108a1345c1e') + P2PKH_VERBYTE = bytes.fromhex("37") + P2SH_VERBYTES = [bytes.fromhex("38")] + WIF_BYTE = bytes.fromhex("3c") + TX_COUNT_HEIGHT = 111111 + TX_COUNT = 256128 + TX_PER_BLOCK = 4 + RPC_PORT = 24127 + PEERS = [ + 'electrum1-polis.polispay.org', + 'electrum2-polis.polispay.org' + ] + SESSIONCLS = DashElectrumX + DAEMON = daemon.DashDaemon + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import x11_hash + return x11_hash.getPoWHash(header) + + +class ColossusXT(Coin): + NAME = "ColossusXT" + SHORTNAME = "COLX" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("022D2533") + XPRV_VERBYTES = bytes.fromhex("0221312B") + GENESIS_HASH = ('a0ce8206c908357008c1b9a8ba2813af' + 'f0989ca7f72d62b14e652c55f02b4f5c') + P2PKH_VERBYTE = bytes.fromhex("1E") + P2SH_VERBYTES = [bytes.fromhex("0D")] + WIF_BYTE = bytes.fromhex("D4") + TX_COUNT_HEIGHT = 356500 + TX_COUNT = 761041 + TX_PER_BLOCK = 4 + RPC_PORT = 51473 + PEERS = [ + 'electrum1-colx.polispay.org', + 'electrum2-colx.polispay.org' + ] + SESSIONCLS = DashElectrumX + DAEMON = daemon.DashDaemon + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import quark_hash + return quark_hash.getPoWHash(header) + + +class GoByte(Coin): + NAME = "GoByte" + SHORTNAME = "GBX" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488B21E") + XPRV_VERBYTES = bytes.fromhex("0488ADE4") + GENESIS_HASH = ('0000033b01055cf8df90b01a14734cae' + '92f7039b9b0e48887b4e33a469d7bc07') + P2PKH_VERBYTE = bytes.fromhex("26") + P2SH_VERBYTES = [bytes.fromhex("0A")] + WIF_BYTE = bytes.fromhex("C6") + TX_COUNT_HEIGHT = 115890 + TX_COUNT = 245030 + TX_PER_BLOCK = 4 + RPC_PORT = 12454 + PEERS = [ + 'electrum1-gbx.polispay.org', + 'electrum2-gbx.polispay.org' + ] + SESSIONCLS = DashElectrumX + DAEMON = daemon.DashDaemon + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import neoscrypt + return neoscrypt.getPoWHash(header) + + +class Monoeci(Coin): + NAME = "Monoeci" + SHORTNAME = "XMCC" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488B21E") + XPRV_VERBYTES = bytes.fromhex("0488ADE4") + GENESIS_HASH = ('0000005be1eb05b05fb45ae38ee9c144' + '1514a65343cd146100a574de4278f1a3') + P2PKH_VERBYTE = bytes.fromhex("32") + P2SH_VERBYTES = [bytes.fromhex("49")] + WIF_BYTE = bytes.fromhex("4D") + TX_COUNT_HEIGHT = 140000 + TX_COUNT = 140000 + TX_PER_BLOCK = 4 + RPC_PORT = 24156 + PEERS = [ + 'electrum1-gbx.polispay.org', + 'electrum2-gbx.polispay.org' + ] + SESSIONCLS = DashElectrumX + DAEMON = daemon.DashDaemon + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import x11_hash + return x11_hash.getPoWHash(header) + + +class Minexcoin(EquihashMixin, Coin): + NAME = "Minexcoin" + SHORTNAME = "MNX" + NET = "mainnet" + P2PKH_VERBYTE = bytes.fromhex("4b") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('490a36d9451a55ed197e34aca7414b35' + 'd775baa4a8e896f1c577f65ce2d214cb') + STATIC_BLOCK_HEADERS = True + BASIC_HEADER_SIZE = 209 + HEADER_SIZE_NO_SOLUTION = 140 + TX_COUNT = 327963 + TX_COUNT_HEIGHT = 74495 + TX_PER_BLOCK = 5 + RPC_PORT = 8022 + CHUNK_SIZE = 960 + PEERS = [ + 'elex01-ams.turinex.eu s t', + 'eu.minexpool.nl s t' + ] + + @classmethod + def electrum_header(cls, header, height): + h = super().electrum_header(header, height) + h['solution'] = hash_to_hex_str(header[cls.HEADER_SIZE_NO_SOLUTION:]) + return h + + @classmethod + def block_header(cls, block, height): + '''Return the block header bytes''' + deserializer = cls.DESERIALIZER(block) + return deserializer.read_header(height, cls.HEADER_SIZE_NO_SOLUTION) + + +class Groestlcoin(Coin): + NAME = "Groestlcoin" + SHORTNAME = "GRS" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("24") + P2SH_VERBYTES = [bytes.fromhex("05")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('00000ac5927c594d49cc0bdb81759d0d' + 'a8297eb614683d3acb62f0703b639023') + DESERIALIZER = lib_tx.DeserializerGroestlcoin + TX_COUNT = 115900 + TX_COUNT_HEIGHT = 1601528 + TX_PER_BLOCK = 5 + RPC_PORT = 1441 + PEERS = [ + 'electrum1.groestlcoin.org s t', + 'electrum2.groestlcoin.org s t', + '6brsrbiinpc32tfc.onion t', + 'xkj42efxrcy6vbfw.onion t', + ] + + def grshash(data): + import groestlcoin_hash + return groestlcoin_hash.getHash(data, len(data)) + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + return cls.grshash(header) + + ENCODE_CHECK = partial(Base58.encode_check, hash_fn=grshash) + DECODE_CHECK = partial(Base58.decode_check, hash_fn=grshash) + + +class GroestlcoinTestnet(Groestlcoin): + SHORTNAME = "TGRS" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("043587cf") + XPRV_VERBYTES = bytes.fromhex("04358394") + P2PKH_VERBYTE = bytes.fromhex("6f") + P2SH_VERBYTES = [bytes.fromhex("c4")] + WIF_BYTE = bytes.fromhex("ef") + GENESIS_HASH = ('000000ffbb50fc9898cdd36ec163e6ba' + '23230164c0052a28876255b7dcf2cd36') + RPC_PORT = 17766 + PEERS = [ + 'electrum-test1.groestlcoin.org s t', + 'electrum-test2.groestlcoin.org s t', + '7frvhgofuf522b5i.onion t', + 'aocojvqcybdoxekv.onion t', + ] + + +class Pivx(Coin): + NAME = "Pivx" + SHORTNAME = "PIVX" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("022D2533") + XPRV_VERBYTES = bytes.fromhex("0221312B") + P2PKH_VERBYTE = bytes.fromhex("1e") + P2SH_VERBYTES = [bytes.fromhex("0d")] + WIF_BYTE = bytes.fromhex("d4") + GENESIS_HASH = ('0000041e482b9b9691d98eefb4847340' + '5c0b8ec31b76df3797c74a78680ef818') + BASIC_HEADER_SIZE = 80 + HDR_V4_SIZE = 112 + HDR_V4_HEIGHT = 863787 + HDR_V4_START_OFFSET = HDR_V4_HEIGHT * BASIC_HEADER_SIZE + TX_COUNT = 2930206 + TX_COUNT_HEIGHT = 1299212 + TX_PER_BLOCK = 2 + RPC_PORT = 51473 + + @classmethod + def static_header_offset(cls, height): + assert cls.STATIC_BLOCK_HEADERS + if height >= cls.HDR_V4_HEIGHT: + relative_v4_offset = (height - cls.HDR_V4_HEIGHT) * cls.HDR_V4_SIZE + return cls.HDR_V4_START_OFFSET + relative_v4_offset + else: + return height * cls.BASIC_HEADER_SIZE + + @classmethod + def header_hash(cls, header): + version, = util.unpack_le_uint32_from(header) + if version >= 4: + return super().header_hash(header) + else: + import quark_hash + return quark_hash.getPoWHash(header) + + +class PivxTestnet(Pivx): + SHORTNAME = "tPIVX" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("3a8061a0") + XPRV_VERBYTES = bytes.fromhex("3a805837") + P2PKH_VERBYTE = bytes.fromhex("8B") + P2SH_VERBYTES = [bytes.fromhex("13")] + WIF_BYTE = bytes.fromhex("EF") + GENESIS_HASH = ( + '0000041e482b9b9691d98eefb48473405c0b8ec31b76df3797c74a78680ef818') + BASIC_HEADER_SIZE = 80 + HDR_V4_SIZE = 112 + HDR_V4_HEIGHT = 863787 + HDR_V4_START_OFFSET = HDR_V4_HEIGHT * BASIC_HEADER_SIZE + TX_COUNT = 2157510 + TX_COUNT_HEIGHT = 569399 + TX_PER_BLOCK = 2 + RPC_PORT = 51472 + + +class Bitg(Coin): + + NAME = "BitcoinGreen" + SHORTNAME = "BITG" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("26") + P2SH_VERBYTES = [bytes.fromhex("06")] + WIF_BYTE = bytes.fromhex("2e") + GENESIS_HASH = ( + '000008467c3a9c587533dea06ad9380cded3ed32f9742a6c0c1aebc21bf2bc9b') + DAEMON = daemon.DashDaemon + TX_COUNT = 1000 + TX_COUNT_HEIGHT = 10000 + TX_PER_BLOCK = 1 + RPC_PORT = 9332 + REORG_LIMIT = 1000 + SESSIONCLS = DashElectrumX + DAEMON = daemon.DashDaemon + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + import quark_hash + return quark_hash.getPoWHash(header) + + +class tBitg(Bitg): + SHORTNAME = "tBITG" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("043587cf") + XPRV_VERBYTES = bytes.fromhex("04358394") + P2PKH_VERBYTE = bytes.fromhex("62") + P2SH_VERBYTES = [bytes.fromhex("0c")] + WIF_BYTE = bytes.fromhex("6c") + GENESIS_HASH = ( + '000008467c3a9c587533dea06ad9380cded3ed32f9742a6c0c1aebc21bf2bc9b') + RPC_PORT = 19332 + + +class CivX(Coin): + NAME = "CivX" + SHORTNAME = "CIVX" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + GENESIS_HASH = ('00000036090a68c523471da7a4f0f958' + 'c1b4403fef74a003be7f71877699cab7') + P2PKH_VERBYTE = bytes.fromhex("1C") + P2SH_VERBYTE = [bytes.fromhex("57")] + WIF_BYTE = bytes.fromhex("9C") + RPC_PORT = 4561 + TX_COUNT = 1000 + TX_COUNT_HEIGHT = 10000 + TX_PER_BLOCK = 4 + DAEMON = daemon.PreLegacyRPCDaemon + DESERIALIZER = lib_tx.DeserializerTxTime + + @classmethod + def header_hash(cls, header): + version, = util.unpack_le_uint32_from(header) + + if version > 2: + return double_sha256(header) + else: + return hex_str_to_hash(CivX.GENESIS_HASH) + + +class CivXTestnet(CivX): + SHORTNAME = "tCIVX" + NET = "testnet" + XPUB_VERBYTES = bytes.fromhex("043587cf") + XPRV_VERBYTES = bytes.fromhex("04358394") + GENESIS_HASH = ('0000059bb2c2048493efcb0f1a034972' + 'b3ce4089d54c93b69aaab212fb369887') + P2PKH_VERBYTE = bytes.fromhex("4B") + P2SH_VERBYTE = [bytes.fromhex("CE")] + WIF_BYTE = bytes.fromhex("CB") + RPC_PORT = 14561 + + @classmethod + def header_hash(cls, header): + version, = util.unpack_le_uint32_from(header) + + if version > 2: + return double_sha256(header) + else: + return hex_str_to_hash(CivXTestnet.GENESIS_HASH) diff --git a/torba/server/daemon.py b/torba/server/daemon.py new file mode 100644 index 000000000..979d00154 --- /dev/null +++ b/torba/server/daemon.py @@ -0,0 +1,459 @@ +# Copyright (c) 2016-2017, Neil Booth +# +# All rights reserved. +# +# See the file "LICENCE" for information about the copyright +# and warranty status of this software. + +'''Class for handling asynchronous connections to a blockchain +daemon.''' + +import asyncio +import itertools +import json +import time +from calendar import timegm +from struct import pack +from time import strptime + +import aiohttp + +from torba.server.util import hex_to_bytes, class_logger,\ + unpack_le_uint16_from, pack_varint +from torba.server.hash import hex_str_to_hash, hash_to_hex_str +from torba.server.tx import DeserializerDecred +from aiorpcx import JSONRPC + + +class DaemonError(Exception): + '''Raised when the daemon returns an error in its results.''' + + +class WarmingUpError(Exception): + '''Internal - when the daemon is warming up.''' + + +class WorkQueueFullError(Exception): + '''Internal - when the daemon's work queue is full.''' + + +class Daemon(object): + '''Handles connections to a daemon at the given URL.''' + + WARMING_UP = -28 + id_counter = itertools.count() + + def __init__(self, coin, url, max_workqueue=10, init_retry=0.25, + max_retry=4.0): + self.coin = coin + self.logger = class_logger(__name__, self.__class__.__name__) + self.set_url(url) + # Limit concurrent RPC calls to this number. + # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16 + self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue) + self.init_retry = init_retry + self.max_retry = max_retry + self._height = None + self.available_rpcs = {} + + def set_url(self, url): + '''Set the URLS to the given list, and switch to the first one.''' + urls = url.split(',') + urls = [self.coin.sanitize_url(url) for url in urls] + for n, url in enumerate(urls): + status = '' if n else ' (current)' + logged_url = self.logged_url(url) + self.logger.info(f'daemon #{n + 1} at {logged_url}{status}') + self.url_index = 0 + self.urls = urls + + def current_url(self): + '''Returns the current daemon URL.''' + return self.urls[self.url_index] + + def logged_url(self, url=None): + '''The host and port part, for logging.''' + url = url or self.current_url() + return url[url.rindex('@') + 1:] + + def failover(self): + '''Call to fail-over to the next daemon URL. + + Returns False if there is only one, otherwise True. + ''' + if len(self.urls) > 1: + self.url_index = (self.url_index + 1) % len(self.urls) + self.logger.info(f'failing over to {self.logged_url()}') + return True + return False + + def client_session(self): + '''An aiohttp client session.''' + return aiohttp.ClientSession() + + async def _send_data(self, data): + async with self.workqueue_semaphore: + async with self.client_session() as session: + async with session.post(self.current_url(), data=data) as resp: + kind = resp.headers.get('Content-Type', None) + if kind == 'application/json': + return await resp.json() + # bitcoind's HTTP protocol "handling" is a bad joke + text = await resp.text() + if 'Work queue depth exceeded' in text: + raise WorkQueueFullError + text = text.strip() or resp.reason + self.logger.error(text) + raise DaemonError(text) + + async def _send(self, payload, processor): + '''Send a payload to be converted to JSON. + + Handles temporary connection issues. Daemon reponse errors + are raise through DaemonError. + ''' + def log_error(error): + nonlocal last_error_log, retry + now = time.time() + if now - last_error_log > 60: + last_error_log = now + self.logger.error(f'{error} Retrying occasionally...') + if retry == self.max_retry and self.failover(): + retry = 0 + + on_good_message = None + last_error_log = 0 + data = json.dumps(payload) + retry = self.init_retry + while True: + try: + result = await self._send_data(data) + result = processor(result) + if on_good_message: + self.logger.info(on_good_message) + return result + except asyncio.TimeoutError: + log_error('timeout error.') + except aiohttp.ServerDisconnectedError: + log_error('disconnected.') + on_good_message = 'connection restored' + except aiohttp.ClientConnectionError: + log_error('connection problem - is your daemon running?') + on_good_message = 'connection restored' + except aiohttp.ClientError as e: + log_error(f'daemon error: {e}') + on_good_message = 'running normally' + except WarmingUpError: + log_error('starting up checking blocks.') + on_good_message = 'running normally' + except WorkQueueFullError: + log_error('work queue full.') + on_good_message = 'running normally' + + await asyncio.sleep(retry) + retry = max(min(self.max_retry, retry * 2), self.init_retry) + + async def _send_single(self, method, params=None): + '''Send a single request to the daemon.''' + def processor(result): + err = result['error'] + if not err: + return result['result'] + if err.get('code') == self.WARMING_UP: + raise WarmingUpError + raise DaemonError(err) + + payload = {'method': method, 'id': next(self.id_counter)} + if params: + payload['params'] = params + return await self._send(payload, processor) + + async def _send_vector(self, method, params_iterable, replace_errs=False): + '''Send several requests of the same method. + + The result will be an array of the same length as params_iterable. + If replace_errs is true, any item with an error is returned as None, + otherwise an exception is raised.''' + def processor(result): + errs = [item['error'] for item in result if item['error']] + if any(err.get('code') == self.WARMING_UP for err in errs): + raise WarmingUpError + if not errs or replace_errs: + return [item['result'] for item in result] + raise DaemonError(errs) + + payload = [{'method': method, 'params': p, 'id': next(self.id_counter)} + for p in params_iterable] + if payload: + return await self._send(payload, processor) + return [] + + async def _is_rpc_available(self, method): + '''Return whether given RPC method is available in the daemon. + + Results are cached and the daemon will generally not be queried with + the same method more than once.''' + available = self.available_rpcs.get(method) + if available is None: + available = True + try: + await self._send_single(method) + except DaemonError as e: + err = e.args[0] + error_code = err.get("code") + available = error_code != JSONRPC.METHOD_NOT_FOUND + self.available_rpcs[method] = available + return available + + async def block_hex_hashes(self, first, count): + '''Return the hex hashes of count block starting at height first.''' + params_iterable = ((h, ) for h in range(first, first + count)) + return await self._send_vector('getblockhash', params_iterable) + + async def deserialised_block(self, hex_hash): + '''Return the deserialised block with the given hex hash.''' + return await self._send_single('getblock', (hex_hash, True)) + + async def raw_blocks(self, hex_hashes): + '''Return the raw binary blocks with the given hex hashes.''' + params_iterable = ((h, False) for h in hex_hashes) + blocks = await self._send_vector('getblock', params_iterable) + # Convert hex string to bytes + return [hex_to_bytes(block) for block in blocks] + + async def mempool_hashes(self): + '''Update our record of the daemon's mempool hashes.''' + return await self._send_single('getrawmempool') + + async def estimatefee(self, block_count): + '''Return the fee estimate for the block count. Units are whole + currency units per KB, e.g. 0.00000995, or -1 if no estimate + is available. + ''' + args = (block_count, ) + if await self._is_rpc_available('estimatesmartfee'): + estimate = await self._send_single('estimatesmartfee', args) + return estimate.get('feerate', -1) + return await self._send_single('estimatefee', args) + + async def getnetworkinfo(self): + '''Return the result of the 'getnetworkinfo' RPC call.''' + return await self._send_single('getnetworkinfo') + + async def relayfee(self): + '''The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.''' + network_info = await self.getnetworkinfo() + return network_info['relayfee'] + + async def getrawtransaction(self, hex_hash, verbose=False): + '''Return the serialized raw transaction with the given hash.''' + # Cast to int because some coin daemons are old and require it + return await self._send_single('getrawtransaction', + (hex_hash, int(verbose))) + + async def getrawtransactions(self, hex_hashes, replace_errs=True): + '''Return the serialized raw transactions with the given hashes. + + Replaces errors with None by default.''' + params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes) + txs = await self._send_vector('getrawtransaction', params_iterable, + replace_errs=replace_errs) + # Convert hex strings to bytes + return [hex_to_bytes(tx) if tx else None for tx in txs] + + async def broadcast_transaction(self, raw_tx): + '''Broadcast a transaction to the network.''' + return await self._send_single('sendrawtransaction', (raw_tx, )) + + async def height(self): + '''Query the daemon for its current height.''' + self._height = await self._send_single('getblockcount') + return self._height + + def cached_height(self): + '''Return the cached daemon height. + + If the daemon has not been queried yet this returns None.''' + return self._height + + +class DashDaemon(Daemon): + + async def masternode_broadcast(self, params): + '''Broadcast a transaction to the network.''' + return await self._send_single('masternodebroadcast', params) + + async def masternode_list(self, params): + '''Return the masternode status.''' + return await self._send_single('masternodelist', params) + + +class FakeEstimateFeeDaemon(Daemon): + '''Daemon that simulates estimatefee and relayfee RPC calls. Coin that + wants to use this daemon must define ESTIMATE_FEE & RELAY_FEE''' + + async def estimatefee(self, block_count): + '''Return the fee estimate for the given parameters.''' + return self.coin.ESTIMATE_FEE + + async def relayfee(self): + '''The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.''' + return self.coin.RELAY_FEE + + +class LegacyRPCDaemon(Daemon): + '''Handles connections to a daemon at the given URL. + + This class is useful for daemons that don't have the new 'getblock' + RPC call that returns the block in hex, the workaround is to manually + recreate the block bytes. The recreated block bytes may not be the exact + as in the underlying blockchain but it is good enough for our indexing + purposes.''' + + async def raw_blocks(self, hex_hashes): + '''Return the raw binary blocks with the given hex hashes.''' + params_iterable = ((h, ) for h in hex_hashes) + block_info = await self._send_vector('getblock', params_iterable) + + blocks = [] + for i in block_info: + raw_block = await self.make_raw_block(i) + blocks.append(raw_block) + + # Convert hex string to bytes + return blocks + + async def make_raw_header(self, b): + pbh = b.get('previousblockhash') + if pbh is None: + pbh = '0' * 64 + return b''.join([ + pack(' 0: + transactions = await self.getrawtransactions(b.get('tx'), False) + + raw_block = header + num_txs = len(transactions) + if num_txs > 0: + raw_block += pack_varint(num_txs) + raw_block += b''.join(transactions) + else: + raw_block += b'\x00' + + return raw_block + + def timestamp_safe(self, t): + if isinstance(t, int): + return t + return timegm(strptime(t, "%Y-%m-%d %H:%M:%S %Z")) + + +class DecredDaemon(Daemon): + async def raw_blocks(self, hex_hashes): + '''Return the raw binary blocks with the given hex hashes.''' + + params_iterable = ((h, False) for h in hex_hashes) + blocks = await self._send_vector('getblock', params_iterable) + + raw_blocks = [] + valid_tx_tree = {} + for block in blocks: + # Convert to bytes from hex + raw_block = hex_to_bytes(block) + raw_blocks.append(raw_block) + # Check if previous block is valid + prev = self.prev_hex_hash(raw_block) + votebits = unpack_le_uint16_from(raw_block[100:102])[0] + valid_tx_tree[prev] = self.is_valid_tx_tree(votebits) + + processed_raw_blocks = [] + for hash, raw_block in zip(hex_hashes, raw_blocks): + if hash in valid_tx_tree: + is_valid = valid_tx_tree[hash] + else: + # Do something complicated to figure out if this block is valid + header = await self._send_single('getblockheader', (hash, )) + if 'nextblockhash' not in header: + raise DaemonError(f'Could not find next block for {hash}') + next_hash = header['nextblockhash'] + next_header = await self._send_single('getblockheader', + (next_hash, )) + is_valid = self.is_valid_tx_tree(next_header['votebits']) + + if is_valid: + processed_raw_blocks.append(raw_block) + else: + # If this block is invalid remove the normal transactions + self.logger.info(f'block {hash} is invalidated') + processed_raw_blocks.append(self.strip_tx_tree(raw_block)) + + return processed_raw_blocks + + @staticmethod + def prev_hex_hash(raw_block): + return hash_to_hex_str(raw_block[4:36]) + + @staticmethod + def is_valid_tx_tree(votebits): + # Check if previous block was invalidated. + return bool(votebits & (1 << 0) != 0) + + def strip_tx_tree(self, raw_block): + c = self.coin + assert issubclass(c.DESERIALIZER, DeserializerDecred) + d = c.DESERIALIZER(raw_block, start=c.BASIC_HEADER_SIZE) + d.read_tx_tree() # Skip normal transactions + # Create a fake block without any normal transactions + return raw_block[:c.BASIC_HEADER_SIZE] + b'\x00' + raw_block[d.cursor:] + + async def height(self): + height = await super().height() + if height > 0: + # Lie about the daemon height as the current tip can be invalidated + height -= 1 + self._height = height + return height + + async def mempool_hashes(self): + mempool = await super().mempool_hashes() + # Add current tip transactions to the 'fake' mempool. + real_height = await self._send_single('getblockcount') + tip_hash = await self._send_single('getblockhash', (real_height,)) + tip = await self.deserialised_block(tip_hash) + # Add normal transactions except coinbase + mempool += tip['tx'][1:] + # Add stake transactions if applicable + mempool += tip.get('stx', []) + return mempool + + def client_session(self): + # FIXME allow self signed certificates + connector = aiohttp.TCPConnector(verify_ssl=False) + return aiohttp.ClientSession(connector=connector) + + +class PreLegacyRPCDaemon(LegacyRPCDaemon): + '''Handles connections to a daemon at the given URL. + + This class is useful for daemons that don't have the new 'getblock' + RPC call that returns the block in hex, and need the False parameter + for the getblock''' + + async def deserialised_block(self, hex_hash): + '''Return the deserialised block with the given hex hash.''' + return await self._send_single('getblock', (hex_hash, False)) diff --git a/torba/server/db.py b/torba/server/db.py new file mode 100644 index 000000000..d594cbff0 --- /dev/null +++ b/torba/server/db.py @@ -0,0 +1,665 @@ +# Copyright (c) 2016, Neil Booth +# Copyright (c) 2017, the ElectrumX authors +# +# All rights reserved. +# +# See the file "LICENCE" for information about the copyright +# and warranty status of this software. + +'''Interface to the blockchain database.''' + + +import array +import ast +import os +import time +from bisect import bisect_right +from collections import namedtuple +from glob import glob +from struct import pack, unpack + +import attr +from aiorpcx import run_in_thread, sleep + +import torba.server.util as util +from torba.server.hash import hash_to_hex_str, HASHX_LEN +from torba.server.merkle import Merkle, MerkleCache +from torba.server.util import formatted_time +from torba.server.storage import db_class +from torba.server.history import History + + +UTXO = namedtuple("UTXO", "tx_num tx_pos tx_hash height value") + + +@attr.s(slots=True) +class FlushData(object): + height = attr.ib() + tx_count = attr.ib() + headers = attr.ib() + block_tx_hashes = attr.ib() + # The following are flushed to the UTXO DB if undo_infos is not None + undo_infos = attr.ib() + adds = attr.ib() + deletes = attr.ib() + tip = attr.ib() + + +class DB(object): + '''Simple wrapper of the backend database for querying. + + Performs no DB update, though the DB will be cleaned on opening if + it was shutdown uncleanly. + ''' + + DB_VERSIONS = [6] + + class DBError(Exception): + '''Raised on general DB errors generally indicating corruption.''' + + def __init__(self, env): + self.logger = util.class_logger(__name__, self.__class__.__name__) + self.env = env + self.coin = env.coin + + # Setup block header size handlers + if self.coin.STATIC_BLOCK_HEADERS: + self.header_offset = self.coin.static_header_offset + self.header_len = self.coin.static_header_len + else: + self.header_offset = self.dynamic_header_offset + self.header_len = self.dynamic_header_len + + self.logger.info(f'switching current directory to {env.db_dir}') + os.chdir(env.db_dir) + + self.db_class = db_class(self.env.db_engine) + self.history = History() + self.utxo_db = None + self.tx_counts = None + self.last_flush = time.time() + + self.logger.info(f'using {self.env.db_engine} for DB backend') + + # Header merkle cache + self.merkle = Merkle() + self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) + + self.headers_file = util.LogicalFile('meta/headers', 2, 16000000) + self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000) + self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000) + if not self.coin.STATIC_BLOCK_HEADERS: + self.headers_offsets_file = util.LogicalFile( + 'meta/headers_offsets', 2, 16000000) + + async def _read_tx_counts(self): + if self.tx_counts is not None: + return + # tx_counts[N] has the cumulative number of txs at the end of + # height N. So tx_counts[0] is 1 - the genesis coinbase + size = (self.db_height + 1) * 4 + tx_counts = self.tx_counts_file.read(0, size) + assert len(tx_counts) == size + self.tx_counts = array.array('I', tx_counts) + if self.tx_counts: + assert self.db_tx_count == self.tx_counts[-1] + else: + assert self.db_tx_count == 0 + + async def _open_dbs(self, for_sync, compacting): + assert self.utxo_db is None + + # First UTXO DB + self.utxo_db = self.db_class('utxo', for_sync) + if self.utxo_db.is_new: + self.logger.info('created new database') + self.logger.info('creating metadata directory') + os.mkdir('meta') + with util.open_file('COIN', create=True) as f: + f.write(f'ElectrumX databases and metadata for ' + f'{self.coin.NAME} {self.coin.NET}'.encode()) + if not self.coin.STATIC_BLOCK_HEADERS: + self.headers_offsets_file.write(0, bytes(8)) + else: + self.logger.info(f'opened UTXO DB (for sync: {for_sync})') + self.read_utxo_state() + + # Then history DB + self.utxo_flush_count = self.history.open_db(self.db_class, for_sync, + self.utxo_flush_count, + compacting) + self.clear_excess_undo_info() + + # Read TX counts (requires meta directory) + await self._read_tx_counts() + + async def open_for_compacting(self): + await self._open_dbs(True, True) + + async def open_for_sync(self): + '''Open the databases to sync to the daemon. + + When syncing we want to reserve a lot of open files for the + synchronization. When serving clients we want the open files for + serving network connections. + ''' + await self._open_dbs(True, False) + + async def open_for_serving(self): + '''Open the databases for serving. If they are already open they are + closed first. + ''' + if self.utxo_db: + self.logger.info('closing DBs to re-open for serving') + self.utxo_db.close() + self.history.close_db() + self.utxo_db = None + await self._open_dbs(False, False) + + # Header merkle cache + + async def populate_header_merkle_cache(self): + self.logger.info('populating header merkle cache...') + length = max(1, self.db_height - self.env.reorg_limit) + start = time.time() + await self.header_mc.initialize(length) + elapsed = time.time() - start + self.logger.info(f'header merkle cache populated in {elapsed:.1f}s') + + async def header_branch_and_root(self, length, height): + return await self.header_mc.branch_and_root(length, height) + + # Flushing + def assert_flushed(self, flush_data): + '''Asserts state is fully flushed.''' + assert flush_data.tx_count == self.fs_tx_count == self.db_tx_count + assert flush_data.height == self.fs_height == self.db_height + assert flush_data.tip == self.db_tip + assert not flush_data.headers + assert not flush_data.block_tx_hashes + assert not flush_data.adds + assert not flush_data.deletes + assert not flush_data.undo_infos + self.history.assert_flushed() + + def flush_dbs(self, flush_data, flush_utxos, estimate_txs_remaining): + '''Flush out cached state. History is always flushed; UTXOs are + flushed if flush_utxos.''' + if flush_data.height == self.db_height: + self.assert_flushed(flush_data) + return + + start_time = time.time() + prior_flush = self.last_flush + tx_delta = flush_data.tx_count - self.last_flush_tx_count + + # Flush to file system + self.flush_fs(flush_data) + + # Then history + self.flush_history() + + # Flush state last as it reads the wall time. + with self.utxo_db.write_batch() as batch: + if flush_utxos: + self.flush_utxo_db(batch, flush_data) + self.flush_state(batch) + + # Update and put the wall time again - otherwise we drop the + # time it took to commit the batch + self.flush_state(self.utxo_db) + + elapsed = self.last_flush - start_time + self.logger.info(f'flush #{self.history.flush_count:,d} took ' + f'{elapsed:.1f}s. Height {flush_data.height:,d} ' + f'txs: {flush_data.tx_count:,d} ({tx_delta:+,d})') + + # Catch-up stats + if self.utxo_db.for_sync: + flush_interval = self.last_flush - prior_flush + tx_per_sec_gen = int(flush_data.tx_count / self.wall_time) + tx_per_sec_last = 1 + int(tx_delta / flush_interval) + eta = estimate_txs_remaining() / tx_per_sec_last + self.logger.info(f'tx/sec since genesis: {tx_per_sec_gen:,d}, ' + f'since last flush: {tx_per_sec_last:,d}') + self.logger.info(f'sync time: {formatted_time(self.wall_time)} ' + f'ETA: {formatted_time(eta)}') + + def flush_fs(self, flush_data): + '''Write headers, tx counts and block tx hashes to the filesystem. + + The first height to write is self.fs_height + 1. The FS + metadata is all append-only, so in a crash we just pick up + again from the height stored in the DB. + ''' + prior_tx_count = (self.tx_counts[self.fs_height] + if self.fs_height >= 0 else 0) + assert len(flush_data.block_tx_hashes) == len(flush_data.headers) + assert flush_data.height == self.fs_height + len(flush_data.headers) + assert flush_data.tx_count == (self.tx_counts[-1] if self.tx_counts + else 0) + assert len(self.tx_counts) == flush_data.height + 1 + hashes = b''.join(flush_data.block_tx_hashes) + flush_data.block_tx_hashes.clear() + assert len(hashes) % 32 == 0 + assert len(hashes) // 32 == flush_data.tx_count - prior_tx_count + + # Write the headers, tx counts, and tx hashes + start_time = time.time() + height_start = self.fs_height + 1 + offset = self.header_offset(height_start) + self.headers_file.write(offset, b''.join(flush_data.headers)) + self.fs_update_header_offsets(offset, height_start, flush_data.headers) + flush_data.headers.clear() + + offset = height_start * self.tx_counts.itemsize + self.tx_counts_file.write(offset, + self.tx_counts[height_start:].tobytes()) + offset = prior_tx_count * 32 + self.hashes_file.write(offset, hashes) + + self.fs_height = flush_data.height + self.fs_tx_count = flush_data.tx_count + + if self.utxo_db.for_sync: + elapsed = time.time() - start_time + self.logger.info(f'flushed filesystem data in {elapsed:.2f}s') + + def flush_history(self): + self.history.flush() + + def flush_utxo_db(self, batch, flush_data): + '''Flush the cached DB writes and UTXO set to the batch.''' + # Care is needed because the writes generated by flushing the + # UTXO state may have keys in common with our write cache or + # may be in the DB already. + start_time = time.time() + add_count = len(flush_data.adds) + spend_count = len(flush_data.deletes) // 2 + + # Spends + batch_delete = batch.delete + for key in sorted(flush_data.deletes): + batch_delete(key) + flush_data.deletes.clear() + + # New UTXOs + batch_put = batch.put + for key, value in flush_data.adds.items(): + # suffix = tx_idx + tx_num + hashX = value[:-12] + suffix = key[-2:] + value[-12:-8] + batch_put(b'h' + key[:4] + suffix, hashX) + batch_put(b'u' + hashX + suffix, value[-8:]) + flush_data.adds.clear() + + # New undo information + self.flush_undo_infos(batch_put, flush_data.undo_infos) + flush_data.undo_infos.clear() + + if self.utxo_db.for_sync: + block_count = flush_data.height - self.db_height + tx_count = flush_data.tx_count - self.db_tx_count + elapsed = time.time() - start_time + self.logger.info(f'flushed {block_count:,d} blocks with ' + f'{tx_count:,d} txs, {add_count:,d} UTXO adds, ' + f'{spend_count:,d} spends in ' + f'{elapsed:.1f}s, committing...') + + self.utxo_flush_count = self.history.flush_count + self.db_height = flush_data.height + self.db_tx_count = flush_data.tx_count + self.db_tip = flush_data.tip + + def flush_state(self, batch): + '''Flush chain state to the batch.''' + now = time.time() + self.wall_time += now - self.last_flush + self.last_flush = now + self.last_flush_tx_count = self.fs_tx_count + self.write_utxo_state(batch) + + def flush_backup(self, flush_data, touched): + '''Like flush_dbs() but when backing up. All UTXOs are flushed.''' + assert not flush_data.headers + assert not flush_data.block_tx_hashes + assert flush_data.height < self.db_height + self.history.assert_flushed() + + start_time = time.time() + tx_delta = flush_data.tx_count - self.last_flush_tx_count + + self.backup_fs(flush_data.height, flush_data.tx_count) + self.history.backup(touched, flush_data.tx_count) + with self.utxo_db.write_batch() as batch: + self.flush_utxo_db(batch, flush_data) + # Flush state last as it reads the wall time. + self.flush_state(batch) + + elapsed = self.last_flush - start_time + self.logger.info(f'backup flush #{self.history.flush_count:,d} took ' + f'{elapsed:.1f}s. Height {flush_data.height:,d} ' + f'txs: {flush_data.tx_count:,d} ({tx_delta:+,d})') + + def fs_update_header_offsets(self, offset_start, height_start, headers): + if self.coin.STATIC_BLOCK_HEADERS: + return + offset = offset_start + offsets = [] + for h in headers: + offset += len(h) + offsets.append(pack("= 0, count >= 0. Reads as many headers as + are available starting at start_height up to count. This + would be zero if start_height is beyond self.db_height, for + example. + + Returns a (binary, n) pair where binary is the concatenated + binary headers, and n is the count of headers returned. + ''' + if start_height < 0 or count < 0: + raise self.DBError(f'{count:,d} headers starting at ' + f'{start_height:,d} not on disk') + + def read_headers(): + # Read some from disk + disk_count = max(0, min(count, self.db_height + 1 - start_height)) + if disk_count: + offset = self.header_offset(start_height) + size = self.header_offset(start_height + disk_count) - offset + return self.headers_file.read(offset, size), disk_count + return b'', 0 + + return await run_in_thread(read_headers) + + def fs_tx_hash(self, tx_num): + '''Return a par (tx_hash, tx_height) for the given tx number. + + If the tx_height is not on disk, returns (None, tx_height).''' + tx_height = bisect_right(self.tx_counts, tx_num) + if tx_height > self.db_height: + tx_hash = None + else: + tx_hash = self.hashes_file.read(tx_num * 32, 32) + return tx_hash, tx_height + + async def fs_block_hashes(self, height, count): + headers_concat, headers_count = await self.read_headers(height, count) + if headers_count != count: + raise self.DBError('only got {:,d} headers starting at {:,d}, not ' + '{:,d}'.format(headers_count, height, count)) + offset = 0 + headers = [] + for n in range(count): + hlen = self.header_len(height + n) + headers.append(headers_concat[offset:offset + hlen]) + offset += hlen + + return [self.coin.header_hash(header) for header in headers] + + async def limited_history(self, hashX, *, limit=1000): + '''Return an unpruned, sorted list of (tx_hash, height) tuples of + confirmed transactions that touched the address, earliest in + the blockchain first. Includes both spending and receiving + transactions. By default returns at most 1000 entries. Set + limit to None to get them all. + ''' + def read_history(): + tx_nums = list(self.history.get_txnums(hashX, limit)) + fs_tx_hash = self.fs_tx_hash + return [fs_tx_hash(tx_num) for tx_num in tx_nums] + + while True: + history = await run_in_thread(read_history) + if all(hash is not None for hash, height in history): + return history + self.logger.warning(f'limited_history: tx hash ' + f'not found (reorg?), retrying...') + await sleep(0.25) + + # -- Undo information + + def min_undo_height(self, max_height): + '''Returns a height from which we should store undo info.''' + return max_height - self.env.reorg_limit + 1 + + def undo_key(self, height): + '''DB key for undo information at the given height.''' + return b'U' + pack('>I', height) + + def read_undo_info(self, height): + '''Read undo information from a file for the current height.''' + return self.utxo_db.get(self.undo_key(height)) + + def flush_undo_infos(self, batch_put, undo_infos): + '''undo_infos is a list of (undo_info, height) pairs.''' + for undo_info, height in undo_infos: + batch_put(self.undo_key(height), b''.join(undo_info)) + + def raw_block_prefix(self): + return 'meta/block' + + def raw_block_path(self, height): + return f'{self.raw_block_prefix()}{height:d}' + + def read_raw_block(self, height): + '''Returns a raw block read from disk. Raises FileNotFoundError + if the block isn't on-disk.''' + with util.open_file(self.raw_block_path(height)) as f: + return f.read(-1) + + def write_raw_block(self, block, height): + '''Write a raw block to disk.''' + with util.open_truncate(self.raw_block_path(height)) as f: + f.write(block) + # Delete old blocks to prevent them accumulating + try: + del_height = self.min_undo_height(height) - 1 + os.remove(self.raw_block_path(del_height)) + except FileNotFoundError: + pass + + def clear_excess_undo_info(self): + '''Clear excess undo info. Only most recent N are kept.''' + prefix = b'U' + min_height = self.min_undo_height(self.db_height) + keys = [] + for key, hist in self.utxo_db.iterator(prefix=prefix): + height, = unpack('>I', key[-4:]) + if height >= min_height: + break + keys.append(key) + + if keys: + with self.utxo_db.write_batch() as batch: + for key in keys: + batch.delete(key) + self.logger.info(f'deleted {len(keys):,d} stale undo entries') + + # delete old block files + prefix = self.raw_block_prefix() + paths = [path for path in glob(f'{prefix}[0-9]*') + if len(path) > len(prefix) + and int(path[len(prefix):]) < min_height] + if paths: + for path in paths: + try: + os.remove(path) + except FileNotFoundError: + pass + self.logger.info(f'deleted {len(paths):,d} stale block files') + + # -- UTXO database + + def read_utxo_state(self): + state = self.utxo_db.get(b'state') + if not state: + self.db_height = -1 + self.db_tx_count = 0 + self.db_tip = b'\0' * 32 + self.db_version = max(self.DB_VERSIONS) + self.utxo_flush_count = 0 + self.wall_time = 0 + self.first_sync = True + else: + state = ast.literal_eval(state.decode()) + if not isinstance(state, dict): + raise self.DBError('failed reading state from DB') + self.db_version = state['db_version'] + if self.db_version not in self.DB_VERSIONS: + raise self.DBError('your UTXO DB version is {} but this ' + 'software only handles versions {}' + .format(self.db_version, self.DB_VERSIONS)) + # backwards compat + genesis_hash = state['genesis'] + if isinstance(genesis_hash, bytes): + genesis_hash = genesis_hash.decode() + if genesis_hash != self.coin.GENESIS_HASH: + raise self.DBError('DB genesis hash {} does not match coin {}' + .format(genesis_hash, + self.coin.GENESIS_HASH)) + self.db_height = state['height'] + self.db_tx_count = state['tx_count'] + self.db_tip = state['tip'] + self.utxo_flush_count = state['utxo_flush_count'] + self.wall_time = state['wall_time'] + self.first_sync = state['first_sync'] + + # These are our state as we move ahead of DB state + self.fs_height = self.db_height + self.fs_tx_count = self.db_tx_count + self.last_flush_tx_count = self.fs_tx_count + + # Log some stats + self.logger.info('DB version: {:d}'.format(self.db_version)) + self.logger.info('coin: {}'.format(self.coin.NAME)) + self.logger.info('network: {}'.format(self.coin.NET)) + self.logger.info('height: {:,d}'.format(self.db_height)) + self.logger.info('tip: {}'.format(hash_to_hex_str(self.db_tip))) + self.logger.info('tx count: {:,d}'.format(self.db_tx_count)) + if self.utxo_db.for_sync: + self.logger.info(f'flushing DB cache at {self.env.cache_MB:,d} MB') + if self.first_sync: + self.logger.info('sync time so far: {}' + .format(util.formatted_time(self.wall_time))) + + def write_utxo_state(self, batch): + '''Write (UTXO) state to the batch.''' + state = { + 'genesis': self.coin.GENESIS_HASH, + 'height': self.db_height, + 'tx_count': self.db_tx_count, + 'tip': self.db_tip, + 'utxo_flush_count': self.utxo_flush_count, + 'wall_time': self.wall_time, + 'first_sync': self.first_sync, + 'db_version': self.db_version, + } + batch.put(b'state', repr(state).encode()) + + def set_flush_count(self, count): + self.utxo_flush_count = count + with self.utxo_db.write_batch() as batch: + self.write_utxo_state(batch) + + async def all_utxos(self, hashX): + '''Return all UTXOs for an address sorted in no particular order.''' + def read_utxos(): + utxos = [] + utxos_append = utxos.append + s_unpack = unpack + # Key: b'u' + address_hashX + tx_idx + tx_num + # Value: the UTXO value as a 64-bit unsigned integer + prefix = b'u' + hashX + for db_key, db_value in self.utxo_db.iterator(prefix=prefix): + tx_pos, tx_num = s_unpack(' utxo_flush_count: + keys.append(key) + + self.logger.info(f'deleting {len(keys):,d} history entries') + + self.flush_count = utxo_flush_count + with self.db.write_batch() as batch: + for key in keys: + batch.delete(key) + self.write_state(batch) + + self.logger.info('deleted excess history entries') + + def write_state(self, batch): + '''Write state to the history DB.''' + state = { + 'flush_count': self.flush_count, + 'comp_flush_count': self.comp_flush_count, + 'comp_cursor': self.comp_cursor, + 'db_version': self.db_version, + } + # History entries are not prefixed; the suffix \0\0 ensures we + # look similar to other entries and aren't interfered with + batch.put(b'state\0\0', repr(state).encode()) + + def add_unflushed(self, hashXs_by_tx, first_tx_num): + unflushed = self.unflushed + count = 0 + for tx_num, hashXs in enumerate(hashXs_by_tx, start=first_tx_num): + hashXs = set(hashXs) + for hashX in hashXs: + unflushed[hashX].append(tx_num) + count += len(hashXs) + self.unflushed_count += count + + def unflushed_memsize(self): + return len(self.unflushed) * 180 + self.unflushed_count * 4 + + def assert_flushed(self): + assert not self.unflushed + + def flush(self): + start_time = time.time() + self.flush_count += 1 + flush_id = pack_be_uint16(self.flush_count) + unflushed = self.unflushed + + with self.db.write_batch() as batch: + for hashX in sorted(unflushed): + key = hashX + flush_id + batch.put(key, unflushed[hashX].tobytes()) + self.write_state(batch) + + count = len(unflushed) + unflushed.clear() + self.unflushed_count = 0 + + if self.db.for_sync: + elapsed = time.time() - start_time + self.logger.info(f'flushed history in {elapsed:.1f}s ' + f'for {count:,d} addrs') + + def backup(self, hashXs, tx_count): + # Not certain this is needed, but it doesn't hurt + self.flush_count += 1 + nremoves = 0 + bisect_left = bisect.bisect_left + + with self.db.write_batch() as batch: + for hashX in sorted(hashXs): + deletes = [] + puts = {} + for key, hist in self.db.iterator(prefix=hashX, reverse=True): + a = array.array('I') + a.frombytes(hist) + # Remove all history entries >= tx_count + idx = bisect_left(a, tx_count) + nremoves += len(a) - idx + if idx > 0: + puts[key] = a[:idx].tobytes() + break + deletes.append(key) + + for key in deletes: + batch.delete(key) + for key, value in puts.items(): + batch.put(key, value) + self.write_state(batch) + + self.logger.info(f'backing up removed {nremoves:,d} history entries') + + def get_txnums(self, hashX, limit=1000): + '''Generator that returns an unpruned, sorted list of tx_nums in the + history of a hashX. Includes both spending and receiving + transactions. By default yields at most 1000 entries. Set + limit to None to get them all. ''' + limit = util.resolve_limit(limit) + for key, hist in self.db.iterator(prefix=hashX): + a = array.array('I') + a.frombytes(hist) + for tx_num in a: + if limit == 0: + return + yield tx_num + limit -= 1 + + # + # History compaction + # + + # comp_cursor is a cursor into compaction progress. + # -1: no compaction in progress + # 0-65535: Compaction in progress; all prefixes < comp_cursor have + # been compacted, and later ones have not. + # 65536: compaction complete in-memory but not flushed + # + # comp_flush_count applies during compaction, and is a flush count + # for history with prefix < comp_cursor. flush_count applies + # to still uncompacted history. It is -1 when no compaction is + # taking place. Key suffixes up to and including comp_flush_count + # are used, so a parallel history flush must first increment this + # + # When compaction is complete and the final flush takes place, + # flush_count is reset to comp_flush_count, and comp_flush_count to -1 + + def _flush_compaction(self, cursor, write_items, keys_to_delete): + '''Flush a single compaction pass as a batch.''' + # Update compaction state + if cursor == 65536: + self.flush_count = self.comp_flush_count + self.comp_cursor = -1 + self.comp_flush_count = -1 + else: + self.comp_cursor = cursor + + # History DB. Flush compacted history and updated state + with self.db.write_batch() as batch: + # Important: delete first! The keyspace may overlap. + for key in keys_to_delete: + batch.delete(key) + for key, value in write_items: + batch.put(key, value) + self.write_state(batch) + + def _compact_hashX(self, hashX, hist_map, hist_list, + write_items, keys_to_delete): + '''Compres history for a hashX. hist_list is an ordered list of + the histories to be compressed.''' + # History entries (tx numbers) are 4 bytes each. Distribute + # over rows of up to 50KB in size. A fixed row size means + # future compactions will not need to update the first N - 1 + # rows. + max_row_size = self.max_hist_row_entries * 4 + full_hist = b''.join(hist_list) + nrows = (len(full_hist) + max_row_size - 1) // max_row_size + if nrows > 4: + self.logger.info('hashX {} is large: {:,d} entries across ' + '{:,d} rows' + .format(hash_to_hex_str(hashX), + len(full_hist) // 4, nrows)) + + # Find what history needs to be written, and what keys need to + # be deleted. Start by assuming all keys are to be deleted, + # and then remove those that are the same on-disk as when + # compacted. + write_size = 0 + keys_to_delete.update(hist_map) + for n, chunk in enumerate(util.chunks(full_hist, max_row_size)): + key = hashX + pack_be_uint16(n) + if hist_map.get(key) == chunk: + keys_to_delete.remove(key) + else: + write_items.append((key, chunk)) + write_size += len(chunk) + + assert n + 1 == nrows + self.comp_flush_count = max(self.comp_flush_count, n) + + return write_size + + def _compact_prefix(self, prefix, write_items, keys_to_delete): + '''Compact all history entries for hashXs beginning with the + given prefix. Update keys_to_delete and write.''' + prior_hashX = None + hist_map = {} + hist_list = [] + + key_len = HASHX_LEN + 2 + write_size = 0 + for key, hist in self.db.iterator(prefix=prefix): + # Ignore non-history entries + if len(key) != key_len: + continue + hashX = key[:-2] + if hashX != prior_hashX and prior_hashX: + write_size += self._compact_hashX(prior_hashX, hist_map, + hist_list, write_items, + keys_to_delete) + hist_map.clear() + hist_list.clear() + prior_hashX = hashX + hist_map[key] = hist + hist_list.append(hist) + + if prior_hashX: + write_size += self._compact_hashX(prior_hashX, hist_map, hist_list, + write_items, keys_to_delete) + return write_size + + def _compact_history(self, limit): + '''Inner loop of history compaction. Loops until limit bytes have + been processed. + ''' + keys_to_delete = set() + write_items = [] # A list of (key, value) pairs + write_size = 0 + + # Loop over 2-byte prefixes + cursor = self.comp_cursor + while write_size < limit and cursor < 65536: + prefix = pack_be_uint16(cursor) + write_size += self._compact_prefix(prefix, write_items, + keys_to_delete) + cursor += 1 + + max_rows = self.comp_flush_count + 1 + self._flush_compaction(cursor, write_items, keys_to_delete) + + self.logger.info('history compaction: wrote {:,d} rows ({:.1f} MB), ' + 'removed {:,d} rows, largest: {:,d}, {:.1f}% complete' + .format(len(write_items), write_size / 1000000, + len(keys_to_delete), max_rows, + 100 * cursor / 65536)) + return write_size + + def _cancel_compaction(self): + if self.comp_cursor != -1: + self.logger.warning('cancelling in-progress history compaction') + self.comp_flush_count = -1 + self.comp_cursor = -1 diff --git a/torba/server/mempool.py b/torba/server/mempool.py new file mode 100644 index 000000000..765e66a4f --- /dev/null +++ b/torba/server/mempool.py @@ -0,0 +1,365 @@ +# Copyright (c) 2016-2018, Neil Booth +# +# All rights reserved. +# +# See the file "LICENCE" for information about the copyright +# and warranty status of this software. + +'''Mempool handling.''' + +import itertools +import time +from abc import ABC, abstractmethod +from asyncio import Lock +from collections import defaultdict + +import attr +from aiorpcx import TaskGroup, run_in_thread, sleep + +from torba.server.hash import hash_to_hex_str, hex_str_to_hash +from torba.server.util import class_logger, chunks +from torba.server.db import UTXO + + +@attr.s(slots=True) +class MemPoolTx(object): + prevouts = attr.ib() + # A pair is a (hashX, value) tuple + in_pairs = attr.ib() + out_pairs = attr.ib() + fee = attr.ib() + size = attr.ib() + + +@attr.s(slots=True) +class MemPoolTxSummary(object): + hash = attr.ib() + fee = attr.ib() + has_unconfirmed_inputs = attr.ib() + + +class MemPoolAPI(ABC): + '''A concrete instance of this class is passed to the MemPool object + and used by it to query DB and blockchain state.''' + + @abstractmethod + async def height(self): + '''Query bitcoind for its height.''' + + @abstractmethod + def cached_height(self): + '''Return the height of bitcoind the last time it was queried, + for any reason, without actually querying it. + ''' + + @abstractmethod + async def mempool_hashes(self): + '''Query bitcoind for the hashes of all transactions in its + mempool, returned as a list.''' + + @abstractmethod + async def raw_transactions(self, hex_hashes): + '''Query bitcoind for the serialized raw transactions with the given + hashes. Missing transactions are returned as None. + + hex_hashes is an iterable of hexadecimal hash strings.''' + + @abstractmethod + async def lookup_utxos(self, prevouts): + '''Return a list of (hashX, value) pairs each prevout if unspent, + otherwise return None if spent or not found. + + prevouts - an iterable of (hash, index) pairs + ''' + + @abstractmethod + async def on_mempool(self, touched, height): + '''Called each time the mempool is synchronized. touched is a set of + hashXs touched since the previous call. height is the + daemon's height at the time the mempool was obtained.''' + + +class MemPool(object): + '''Representation of the daemon's mempool. + + coin - a coin class from coins.py + api - an object implementing MemPoolAPI + + Updated regularly in caught-up state. Goal is to enable efficient + response to the calls in the external interface. To that end we + maintain the following maps: + + tx: tx_hash -> MemPoolTx + hashXs: hashX -> set of all hashes of txs touching the hashX + ''' + + def __init__(self, coin, api, refresh_secs=5.0, log_status_secs=120.0): + assert isinstance(api, MemPoolAPI) + self.coin = coin + self.api = api + self.logger = class_logger(__name__, self.__class__.__name__) + self.txs = {} + self.hashXs = defaultdict(set) # None can be a key + self.cached_compact_histogram = [] + self.refresh_secs = refresh_secs + self.log_status_secs = log_status_secs + # Prevents mempool refreshes during fee histogram calculation + self.lock = Lock() + + async def _logging(self, synchronized_event): + '''Print regular logs of mempool stats.''' + self.logger.info('beginning processing of daemon mempool. ' + 'This can take some time...') + start = time.time() + await synchronized_event.wait() + elapsed = time.time() - start + self.logger.info(f'synced in {elapsed:.2f}s') + while True: + self.logger.info(f'{len(self.txs):,d} txs ' + f'touching {len(self.hashXs):,d} addresses') + await sleep(self.log_status_secs) + await synchronized_event.wait() + + async def _refresh_histogram(self, synchronized_event): + while True: + await synchronized_event.wait() + async with self.lock: + # Threaded as can be expensive + await run_in_thread(self._update_histogram, 100_000) + await sleep(self.coin.MEMPOOL_HISTOGRAM_REFRESH_SECS) + + def _update_histogram(self, bin_size): + # Build a histogram by fee rate + histogram = defaultdict(int) + for tx in self.txs.values(): + histogram[tx.fee // tx.size] += tx.size + + # Now compact it. For efficiency, get_fees returns a + # compact histogram with variable bin size. The compact + # histogram is an array of (fee_rate, vsize) values. + # vsize_n is the cumulative virtual size of mempool + # transactions with a fee rate in the interval + # [rate_(n-1), rate_n)], and rate_(n-1) > rate_n. + # Intervals are chosen to create tranches containing at + # least 100kb of transactions + compact = [] + cum_size = 0 + r = 0 # ? + for fee_rate, size in sorted(histogram.items(), reverse=True): + cum_size += size + if cum_size + r > bin_size: + compact.append((fee_rate, cum_size)) + r += cum_size - bin_size + cum_size = 0 + bin_size *= 1.1 + self.logger.info(f'compact fee histogram: {compact}') + self.cached_compact_histogram = compact + + def _accept_transactions(self, tx_map, utxo_map, touched): + '''Accept transactions in tx_map to the mempool if all their inputs + can be found in the existing mempool or a utxo_map from the + DB. + + Returns an (unprocessed tx_map, unspent utxo_map) pair. + ''' + hashXs = self.hashXs + txs = self.txs + + deferred = {} + unspent = set(utxo_map) + # Try to find all prevouts so we can accept the TX + for hash, tx in tx_map.items(): + in_pairs = [] + try: + for prevout in tx.prevouts: + utxo = utxo_map.get(prevout) + if not utxo: + prev_hash, prev_index = prevout + # Raises KeyError if prev_hash is not in txs + utxo = txs[prev_hash].out_pairs[prev_index] + in_pairs.append(utxo) + except KeyError: + deferred[hash] = tx + continue + + # Spend the prevouts + unspent.difference_update(tx.prevouts) + + # Save the in_pairs, compute the fee and accept the TX + tx.in_pairs = tuple(in_pairs) + # Avoid negative fees if dealing with generation-like transactions + # because some in_parts would be missing + tx.fee = max(0, (sum(v for _, v in tx.in_pairs) - + sum(v for _, v in tx.out_pairs))) + txs[hash] = tx + + for hashX, value in itertools.chain(tx.in_pairs, tx.out_pairs): + touched.add(hashX) + hashXs[hashX].add(hash) + + return deferred, {prevout: utxo_map[prevout] for prevout in unspent} + + async def _refresh_hashes(self, synchronized_event): + '''Refresh our view of the daemon's mempool.''' + while True: + height = self.api.cached_height() + hex_hashes = await self.api.mempool_hashes() + if height != await self.api.height(): + continue + hashes = set(hex_str_to_hash(hh) for hh in hex_hashes) + async with self.lock: + touched = await self._process_mempool(hashes) + synchronized_event.set() + synchronized_event.clear() + await self.api.on_mempool(touched, height) + await sleep(self.refresh_secs) + + async def _process_mempool(self, all_hashes): + # Re-sync with the new set of hashes + txs = self.txs + hashXs = self.hashXs + touched = set() + + # First handle txs that have disappeared + for tx_hash in set(txs).difference(all_hashes): + tx = txs.pop(tx_hash) + tx_hashXs = set(hashX for hashX, value in tx.in_pairs) + tx_hashXs.update(hashX for hashX, value in tx.out_pairs) + for hashX in tx_hashXs: + hashXs[hashX].remove(tx_hash) + if not hashXs[hashX]: + del hashXs[hashX] + touched.update(tx_hashXs) + + # Process new transactions + new_hashes = list(all_hashes.difference(txs)) + if new_hashes: + group = TaskGroup() + for hashes in chunks(new_hashes, 200): + coro = self._fetch_and_accept(hashes, all_hashes, touched) + await group.spawn(coro) + tx_map = {} + utxo_map = {} + async for task in group: + deferred, unspent = task.result() + tx_map.update(deferred) + utxo_map.update(unspent) + + prior_count = 0 + # FIXME: this is not particularly efficient + while tx_map and len(tx_map) != prior_count: + prior_count = len(tx_map) + tx_map, utxo_map = self._accept_transactions(tx_map, utxo_map, + touched) + if tx_map: + self.logger.info(f'{len(tx_map)} txs dropped') + + return touched + + async def _fetch_and_accept(self, hashes, all_hashes, touched): + '''Fetch a list of mempool transactions.''' + hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes) + raw_txs = await self.api.raw_transactions(hex_hashes_iter) + + def deserialize_txs(): # This function is pure + to_hashX = self.coin.hashX_from_script + deserializer = self.coin.DESERIALIZER + + txs = {} + for hash, raw_tx in zip(hashes, raw_txs): + # The daemon may have evicted the tx from its + # mempool or it may have gotten in a block + if not raw_tx: + continue + tx, tx_size = deserializer(raw_tx).read_tx_and_vsize() + # Convert the inputs and outputs into (hashX, value) pairs + # Drop generation-like inputs from MemPoolTx.prevouts + txin_pairs = tuple((txin.prev_hash, txin.prev_idx) + for txin in tx.inputs + if not txin.is_generation()) + txout_pairs = tuple((to_hashX(txout.pk_script), txout.value) + for txout in tx.outputs) + txs[hash] = MemPoolTx(txin_pairs, None, txout_pairs, + 0, tx_size) + return txs + + # Thread this potentially slow operation so as not to block + tx_map = await run_in_thread(deserialize_txs) + + # Determine all prevouts not in the mempool, and fetch the + # UTXO information from the database. Failed prevout lookups + # return None - concurrent database updates happen - which is + # relied upon by _accept_transactions. Ignore prevouts that are + # generation-like. + prevouts = tuple(prevout for tx in tx_map.values() + for prevout in tx.prevouts + if prevout[0] not in all_hashes) + utxos = await self.api.lookup_utxos(prevouts) + utxo_map = {prevout: utxo for prevout, utxo in zip(prevouts, utxos)} + + return self._accept_transactions(tx_map, utxo_map, touched) + + # + # External interface + # + + async def keep_synchronized(self, synchronized_event): + '''Keep the mempool synchronized with the daemon.''' + async with TaskGroup() as group: + await group.spawn(self._refresh_hashes(synchronized_event)) + await group.spawn(self._refresh_histogram(synchronized_event)) + await group.spawn(self._logging(synchronized_event)) + + async def balance_delta(self, hashX): + '''Return the unconfirmed amount in the mempool for hashX. + + Can be positive or negative. + ''' + value = 0 + if hashX in self.hashXs: + for hash in self.hashXs[hashX]: + tx = self.txs[hash] + value -= sum(v for h168, v in tx.in_pairs if h168 == hashX) + value += sum(v for h168, v in tx.out_pairs if h168 == hashX) + return value + + async def compact_fee_histogram(self): + '''Return a compact fee histogram of the current mempool.''' + return self.cached_compact_histogram + + async def potential_spends(self, hashX): + '''Return a set of (prev_hash, prev_idx) pairs from mempool + transactions that touch hashX. + + None, some or all of these may be spends of the hashX, but all + actual spends of it (in the DB or mempool) will be included. + ''' + result = set() + for tx_hash in self.hashXs.get(hashX, ()): + tx = self.txs[tx_hash] + result.update(tx.prevouts) + return result + + async def transaction_summaries(self, hashX): + '''Return a list of MemPoolTxSummary objects for the hashX.''' + result = [] + for tx_hash in self.hashXs.get(hashX, ()): + tx = self.txs[tx_hash] + has_ui = any(hash in self.txs for hash, idx in tx.prevouts) + result.append(MemPoolTxSummary(tx_hash, tx.fee, has_ui)) + return result + + async def unordered_UTXOs(self, hashX): + '''Return an unordered list of UTXO named tuples from mempool + transactions that pay to hashX. + + This does not consider if any other mempool transactions spend + the outputs. + ''' + utxos = [] + for tx_hash in self.hashXs.get(hashX, ()): + tx = self.txs.get(tx_hash) + for pos, (hX, value) in enumerate(tx.out_pairs): + if hX == hashX: + utxos.append(UTXO(-1, pos, tx_hash, 0, value)) + return utxos diff --git a/torba/server/merkle.py b/torba/server/merkle.py new file mode 100644 index 000000000..bdb0a90a9 --- /dev/null +++ b/torba/server/merkle.py @@ -0,0 +1,254 @@ +# Copyright (c) 2018, Neil Booth +# +# All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# and warranty status of this software. + +'''Merkle trees, branches, proofs and roots.''' + +from math import ceil, log + +from aiorpcx import Event + +from torba.server.hash import double_sha256 + + +class Merkle(object): + '''Perform merkle tree calculations on binary hashes using a given hash + function. + + If the hash count is not even, the final hash is repeated when + calculating the next merkle layer up the tree. + ''' + + def __init__(self, hash_func=double_sha256): + self.hash_func = hash_func + + def tree_depth(self, hash_count): + return self.branch_length(hash_count) + 1 + + def branch_length(self, hash_count): + '''Return the length of a merkle branch given the number of hashes.''' + if not isinstance(hash_count, int): + raise TypeError('hash_count must be an integer') + if hash_count < 1: + raise ValueError('hash_count must be at least 1') + return ceil(log(hash_count, 2)) + + def branch_and_root(self, hashes, index, length=None): + '''Return a (merkle branch, merkle_root) pair given hashes, and the + index of one of those hashes. + ''' + hashes = list(hashes) + if not isinstance(index, int): + raise TypeError('index must be an integer') + # This also asserts hashes is not empty + if not 0 <= index < len(hashes): + raise ValueError('index out of range') + natural_length = self.branch_length(len(hashes)) + if length is None: + length = natural_length + else: + if not isinstance(length, int): + raise TypeError('length must be an integer') + if length < natural_length: + raise ValueError('length out of range') + + hash_func = self.hash_func + branch = [] + for _ in range(length): + if len(hashes) & 1: + hashes.append(hashes[-1]) + branch.append(hashes[index ^ 1]) + index >>= 1 + hashes = [hash_func(hashes[n] + hashes[n + 1]) + for n in range(0, len(hashes), 2)] + + return branch, hashes[0] + + def root(self, hashes, length=None): + '''Return the merkle root of a non-empty iterable of binary hashes.''' + branch, root = self.branch_and_root(hashes, 0, length) + return root + + def root_from_proof(self, hash, branch, index): + '''Return the merkle root given a hash, a merkle branch to it, and + its index in the hashes array. + + branch is an iterable sorted deepest to shallowest. If the + returned root is the expected value then the merkle proof is + verified. + + The caller should have confirmed the length of the branch with + branch_length(). Unfortunately this is not easily done for + bitcoin transactions as the number of transactions in a block + is unknown to an SPV client. + ''' + hash_func = self.hash_func + for elt in branch: + if index & 1: + hash = hash_func(elt + hash) + else: + hash = hash_func(hash + elt) + index >>= 1 + if index: + raise ValueError('index out of range for branch') + return hash + + def level(self, hashes, depth_higher): + '''Return a level of the merkle tree of hashes the given depth + higher than the bottom row of the original tree.''' + size = 1 << depth_higher + root = self.root + return [root(hashes[n: n + size], depth_higher) + for n in range(0, len(hashes), size)] + + def branch_and_root_from_level(self, level, leaf_hashes, index, + depth_higher): + '''Return a (merkle branch, merkle_root) pair when a merkle-tree has a + level cached. + + To maximally reduce the amount of data hashed in computing a + markle branch, cache a tree of depth N at level N // 2. + + level is a list of hashes in the middle of the tree (returned + by level()) + + leaf_hashes are the leaves needed to calculate a partial branch + up to level. + + depth_higher is how much higher level is than the leaves of the tree + + index is the index in the full list of hashes of the hash whose + merkle branch we want. + ''' + if not isinstance(level, list): + raise TypeError("level must be a list") + if not isinstance(leaf_hashes, list): + raise TypeError("leaf_hashes must be a list") + leaf_index = (index >> depth_higher) << depth_higher + leaf_branch, leaf_root = self.branch_and_root( + leaf_hashes, index - leaf_index, depth_higher) + index >>= depth_higher + level_branch, root = self.branch_and_root(level, index) + # Check last so that we know index is in-range + if leaf_root != level[index]: + raise ValueError('leaf hashes inconsistent with level') + return leaf_branch + level_branch, root + + +class MerkleCache(object): + '''A cache to calculate merkle branches efficiently.''' + + def __init__(self, merkle, source_func): + '''Initialise a cache hashes taken from source_func: + + async def source_func(index, count): + ... + ''' + self.merkle = merkle + self.source_func = source_func + self.length = 0 + self.depth_higher = 0 + self.initialized = Event() + + def _segment_length(self): + return 1 << self.depth_higher + + def _leaf_start(self, index): + '''Given a level's depth higher and a hash index, return the leaf + index and leaf hash count needed to calculate a merkle branch. + ''' + depth_higher = self.depth_higher + return (index >> depth_higher) << depth_higher + + def _level(self, hashes): + return self.merkle.level(hashes, self.depth_higher) + + async def _extend_to(self, length): + '''Extend the length of the cache if necessary.''' + if length <= self.length: + return + # Start from the beginning of any final partial segment. + # Retain the value of depth_higher; in practice this is fine + start = self._leaf_start(self.length) + hashes = await self.source_func(start, length - start) + self.level[start >> self.depth_higher:] = self._level(hashes) + self.length = length + + async def _level_for(self, length): + '''Return a (level_length, final_hash) pair for a truncation + of the hashes to the given length.''' + if length == self.length: + return self.level + level = self.level[:length >> self.depth_higher] + leaf_start = self._leaf_start(length) + count = min(self._segment_length(), length - leaf_start) + hashes = await self.source_func(leaf_start, count) + level += self._level(hashes) + return level + + async def initialize(self, length): + '''Call to initialize the cache to a source of given length.''' + self.length = length + self.depth_higher = self.merkle.tree_depth(length) // 2 + self.level = self._level(await self.source_func(0, length)) + self.initialized.set() + + def truncate(self, length): + '''Truncate the cache so it covers no more than length underlying + hashes.''' + if not isinstance(length, int): + raise TypeError('length must be an integer') + if length <= 0: + raise ValueError('length must be positive') + if length >= self.length: + return + length = self._leaf_start(length) + self.length = length + self.level[length >> self.depth_higher:] = [] + + async def branch_and_root(self, length, index): + '''Return a merkle branch and root. Length is the number of + hashes used to calculate the merkle root, index is the position + of the hash to calculate the branch of. + + index must be less than length, which must be at least 1.''' + if not isinstance(length, int): + raise TypeError('length must be an integer') + if not isinstance(index, int): + raise TypeError('index must be an integer') + if length <= 0: + raise ValueError('length must be positive') + if index >= length: + raise ValueError('index must be less than length') + await self.initialized.wait() + await self._extend_to(length) + leaf_start = self._leaf_start(index) + count = min(self._segment_length(), length - leaf_start) + leaf_hashes = await self.source_func(leaf_start, count) + if length < self._segment_length(): + return self.merkle.branch_and_root(leaf_hashes, index) + level = await self._level_for(length) + return self.merkle.branch_and_root_from_level( + level, leaf_hashes, index, self.depth_higher) diff --git a/torba/server/peer.py b/torba/server/peer.py new file mode 100644 index 000000000..7b392acbe --- /dev/null +++ b/torba/server/peer.py @@ -0,0 +1,301 @@ +# Copyright (c) 2017, Neil Booth +# +# All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +'''Representation of a peer server.''' + +from ipaddress import ip_address + +from torba.server.util import cachedproperty +import torba.server.util as util + +from typing import Dict + + +class Peer(object): + + # Protocol version + ATTRS = ('host', 'features', + # metadata + 'source', 'ip_addr', + 'last_good', 'last_try', 'try_count') + FEATURES = ('pruning', 'server_version', 'protocol_min', 'protocol_max', + 'ssl_port', 'tcp_port') + # This should be set by the application + DEFAULT_PORTS: Dict[str, int] = {} + + def __init__(self, host, features, source='unknown', ip_addr=None, + last_good=0, last_try=0, try_count=0): + '''Create a peer given a host name (or IP address as a string), + a dictionary of features, and a record of the source.''' + assert isinstance(host, str) + assert isinstance(features, dict) + assert host in features.get('hosts', {}) + self.host = host + self.features = features.copy() + # Canonicalize / clean-up + for feature in self.FEATURES: + self.features[feature] = getattr(self, feature) + # Metadata + self.source = source + self.ip_addr = ip_addr + # last_good represents the last connection that was + # successful *and* successfully verified, at which point + # try_count is set to 0. Failure to connect or failure to + # verify increment the try_count. + self.last_good = last_good + self.last_try = last_try + self.try_count = try_count + # Transient, non-persisted metadata + self.bad = False + self.other_port_pairs = set() + + @classmethod + def peers_from_features(cls, features, source): + peers = [] + if isinstance(features, dict): + hosts = features.get('hosts') + if isinstance(hosts, dict): + peers = [Peer(host, features, source=source) + for host in hosts if isinstance(host, str)] + return peers + + @classmethod + def deserialize(cls, item): + '''Deserialize from a dictionary.''' + return cls(**item) + + def matches(self, peers): + '''Return peers whose host matches our hostname or IP address. + Additionally include all peers whose IP address matches our + hostname if that is an IP address. + ''' + candidates = (self.host.lower(), self.ip_addr) + return [peer for peer in peers + if peer.host.lower() in candidates + or peer.ip_addr == self.host] + + def __str__(self): + return self.host + + def update_features(self, features): + '''Update features in-place.''' + try: + tmp = Peer(self.host, features) + except Exception: + pass + else: + self.update_features_from_peer(tmp) + + def update_features_from_peer(self, peer): + if peer != self: + self.features = peer.features + for feature in self.FEATURES: + setattr(self, feature, getattr(peer, feature)) + + def connection_port_pairs(self): + '''Return a list of (kind, port) pairs to try when making a + connection.''' + # Use a list not a set - it's important to try the registered + # ports first. + pairs = [('SSL', self.ssl_port), ('TCP', self.tcp_port)] + while self.other_port_pairs: + pairs.append(self.other_port_pairs.pop()) + return [pair for pair in pairs if pair[1]] + + def mark_bad(self): + '''Mark as bad to avoid reconnects but also to remember for a + while.''' + self.bad = True + + def check_ports(self, other): + '''Remember differing ports in case server operator changed them + or removed one.''' + if other.ssl_port != self.ssl_port: + self.other_port_pairs.add(('SSL', other.ssl_port)) + if other.tcp_port != self.tcp_port: + self.other_port_pairs.add(('TCP', other.tcp_port)) + return bool(self.other_port_pairs) + + @cachedproperty + def is_tor(self): + return self.host.endswith('.onion') + + @cachedproperty + def is_valid(self): + ip = self.ip_address + if ip: + return ((ip.is_global or ip.is_private) + and not (ip.is_multicast or ip.is_unspecified)) + return util.is_valid_hostname(self.host) + + @cachedproperty + def is_public(self): + ip = self.ip_address + if ip: + return self.is_valid and not ip.is_private + else: + return self.is_valid and self.host != 'localhost' + + @cachedproperty + def ip_address(self): + '''The host as a python ip_address object, or None.''' + try: + return ip_address(self.host) + except ValueError: + return None + + def bucket(self): + if self.is_tor: + return 'onion' + if not self.ip_addr: + return '' + return tuple(self.ip_addr.split('.')[:2]) + + def serialize(self): + '''Serialize to a dictionary.''' + return {attr: getattr(self, attr) for attr in self.ATTRS} + + def _port(self, key): + hosts = self.features.get('hosts') + if isinstance(hosts, dict): + host = hosts.get(self.host) + port = self._integer(key, host) + if port and 0 < port < 65536: + return port + return None + + def _integer(self, key, d=None): + d = d or self.features + result = d.get(key) if isinstance(d, dict) else None + if isinstance(result, str): + try: + result = int(result) + except ValueError: + pass + return result if isinstance(result, int) else None + + def _string(self, key): + result = self.features.get(key) + return result if isinstance(result, str) else None + + @cachedproperty + def genesis_hash(self): + '''Returns None if no SSL port, otherwise the port as an integer.''' + return self._string('genesis_hash') + + @cachedproperty + def ssl_port(self): + '''Returns None if no SSL port, otherwise the port as an integer.''' + return self._port('ssl_port') + + @cachedproperty + def tcp_port(self): + '''Returns None if no TCP port, otherwise the port as an integer.''' + return self._port('tcp_port') + + @cachedproperty + def server_version(self): + '''Returns the server version as a string if known, otherwise None.''' + return self._string('server_version') + + @cachedproperty + def pruning(self): + '''Returns the pruning level as an integer. None indicates no + pruning.''' + pruning = self._integer('pruning') + if pruning and pruning > 0: + return pruning + return None + + def _protocol_version_string(self, key): + version_str = self.features.get(key) + ptuple = util.protocol_tuple(version_str) + return util.version_string(ptuple) + + @cachedproperty + def protocol_min(self): + '''Minimum protocol version as a string, e.g., 1.0''' + return self._protocol_version_string('protocol_min') + + @cachedproperty + def protocol_max(self): + '''Maximum protocol version as a string, e.g., 1.1''' + return self._protocol_version_string('protocol_max') + + def to_tuple(self): + '''The tuple ((ip, host, details) expected in response + to a peers subscription.''' + details = self.real_name().split()[1:] + return (self.ip_addr or self.host, self.host, details) + + def real_name(self): + '''Real name of this peer as used on IRC.''' + def port_text(letter, port): + if port == self.DEFAULT_PORTS.get(letter): + return letter + else: + return letter + str(port) + + parts = [self.host, 'v' + self.protocol_max] + if self.pruning: + parts.append('p{:d}'.format(self.pruning)) + for letter, port in (('s', self.ssl_port), ('t', self.tcp_port)): + if port: + parts.append(port_text(letter, port)) + return ' '.join(parts) + + @classmethod + def from_real_name(cls, real_name, source): + '''Real name is a real name as on IRC, such as + + "erbium1.sytes.net v1.0 s t" + + Returns an instance of this Peer class. + ''' + host = 'nohost' + features = {} + ports = {} + for n, part in enumerate(real_name.split()): + if n == 0: + host = part + continue + if part[0] in ('s', 't'): + if len(part) == 1: + port = cls.DEFAULT_PORTS[part[0]] + else: + port = part[1:] + if part[0] == 's': + ports['ssl_port'] = port + else: + ports['tcp_port'] = port + elif part[0] == 'v': + features['protocol_max'] = features['protocol_min'] = part[1:] + elif part[0] == 'p': + features['pruning'] = part[1:] + + features.update(ports) + features['hosts'] = {host: ports} + + return cls(host, features, source) diff --git a/torba/server/peers.py b/torba/server/peers.py new file mode 100644 index 000000000..86e8a292a --- /dev/null +++ b/torba/server/peers.py @@ -0,0 +1,510 @@ +# Copyright (c) 2017-2018, Neil Booth +# +# All rights reserved. +# +# See the file "LICENCE" for information about the copyright +# and warranty status of this software. + +'''Peer management.''' + +import asyncio +import random +import socket +import ssl +import time +from collections import defaultdict, Counter + +from aiorpcx import (Connector, RPCSession, SOCKSProxy, + Notification, handler_invocation, + SOCKSError, RPCError, TaskTimeout, TaskGroup, Event, + sleep, run_in_thread, ignore_after, timeout_after) + +from torba.server.peer import Peer +from torba.server.util import class_logger, protocol_tuple + +PEER_GOOD, PEER_STALE, PEER_NEVER, PEER_BAD = range(4) +STALE_SECS = 24 * 3600 +WAKEUP_SECS = 300 + + +class BadPeerError(Exception): + pass + + +def assert_good(message, result, instance): + if not isinstance(result, instance): + raise BadPeerError(f'{message} returned bad result type ' + f'{type(result).__name__}') + + +class PeerSession(RPCSession): + '''An outgoing session to a peer.''' + + async def handle_request(self, request): + # We subscribe so might be unlucky enough to get a notification... + if (isinstance(request, Notification) and + request.method == 'blockchain.headers.subscribe'): + pass + else: + await handler_invocation(None, request) # Raises + + +class PeerManager(object): + '''Looks after the DB of peer network servers. + + Attempts to maintain a connection with up to 8 peers. + Issues a 'peers.subscribe' RPC to them and tells them our data. + ''' + def __init__(self, env, db): + self.logger = class_logger(__name__, self.__class__.__name__) + # Initialise the Peer class + Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS + self.env = env + self.db = db + + # Our clearnet and Tor Peers, if any + sclass = env.coin.SESSIONCLS + self.myselves = [Peer(ident.host, sclass.server_features(env), 'env') + for ident in env.identities] + self.server_version_args = sclass.server_version_args() + # Peers have one entry per hostname. Once connected, the + # ip_addr property is either None, an onion peer, or the + # IP address that was connected to. Adding a peer will evict + # any other peers with the same host name or IP address. + self.peers = set() + self.permit_onion_peer_time = time.time() + self.proxy = None + self.group = TaskGroup() + + def _my_clearnet_peer(self): + '''Returns the clearnet peer representing this server, if any.''' + clearnet = [peer for peer in self.myselves if not peer.is_tor] + return clearnet[0] if clearnet else None + + def _set_peer_statuses(self): + '''Set peer statuses.''' + cutoff = time.time() - STALE_SECS + for peer in self.peers: + if peer.bad: + peer.status = PEER_BAD + elif peer.last_good > cutoff: + peer.status = PEER_GOOD + elif peer.last_good: + peer.status = PEER_STALE + else: + peer.status = PEER_NEVER + + def _features_to_register(self, peer, remote_peers): + '''If we should register ourselves to the remote peer, which has + reported the given list of known peers, return the clearnet + identity features to register, otherwise None. + ''' + # Announce ourself if not present. Don't if disabled, we + # are a non-public IP address, or to ourselves. + if not self.env.peer_announce or peer in self.myselves: + return None + my = self._my_clearnet_peer() + if not my or not my.is_public: + return None + # Register if no matches, or ports have changed + for peer in my.matches(remote_peers): + if peer.tcp_port == my.tcp_port and peer.ssl_port == my.ssl_port: + return None + return my.features + + def _permit_new_onion_peer(self): + '''Accept a new onion peer only once per random time interval.''' + now = time.time() + if now < self.permit_onion_peer_time: + return False + self.permit_onion_peer_time = now + random.randrange(0, 1200) + return True + + async def _import_peers(self): + '''Import hard-coded peers from a file or the coin defaults.''' + imported_peers = self.myselves.copy() + # Add the hard-coded ones unless only reporting ourself + if self.env.peer_discovery != self.env.PD_SELF: + imported_peers.extend(Peer.from_real_name(real_name, 'coins.py') + for real_name in self.env.coin.PEERS) + await self._note_peers(imported_peers, limit=None) + + async def _detect_proxy(self): + '''Detect a proxy if we don't have one and some time has passed since + the last attempt. + + If found self.proxy is set to a SOCKSProxy instance, otherwise + None. + ''' + host = self.env.tor_proxy_host + if self.env.tor_proxy_port is None: + ports = [9050, 9150, 1080] + else: + ports = [self.env.tor_proxy_port] + while True: + self.logger.info(f'trying to detect proxy on "{host}" ' + f'ports {ports}') + proxy = await SOCKSProxy.auto_detect_host(host, ports, None) + if proxy: + self.proxy = proxy + self.logger.info(f'detected {proxy}') + return + self.logger.info('no proxy detected, will try later') + await sleep(900) + + async def _note_peers(self, peers, limit=2, check_ports=False, + source=None): + '''Add a limited number of peers that are not already present.''' + new_peers = [] + for peer in peers: + if not peer.is_public or (peer.is_tor and not self.proxy): + continue + + matches = peer.matches(self.peers) + if not matches: + new_peers.append(peer) + elif check_ports: + for match in matches: + if match.check_ports(peer): + self.logger.info(f'ports changed for {peer}') + match.retry_event.set() + + if new_peers: + source = source or new_peers[0].source + if limit: + random.shuffle(new_peers) + use_peers = new_peers[:limit] + else: + use_peers = new_peers + for peer in use_peers: + self.logger.info(f'accepted new peer {peer} from {source}') + peer.retry_event = Event() + self.peers.add(peer) + await self.group.spawn(self._monitor_peer(peer)) + + async def _monitor_peer(self, peer): + # Stop monitoring if we were dropped (a duplicate peer) + while peer in self.peers: + if await self._should_drop_peer(peer): + self.peers.discard(peer) + break + # Figure out how long to sleep before retrying. Retry a + # good connection when it is about to turn stale, otherwise + # exponentially back off retries. + if peer.try_count == 0: + pause = STALE_SECS - WAKEUP_SECS * 2 + else: + pause = WAKEUP_SECS * 2 ** peer.try_count + async with ignore_after(pause): + await peer.retry_event.wait() + peer.retry_event.clear() + + async def _should_drop_peer(self, peer): + peer.try_count += 1 + is_good = False + for kind, port in peer.connection_port_pairs(): + peer.last_try = time.time() + + kwargs = {} + if kind == 'SSL': + kwargs['ssl'] = ssl.SSLContext(ssl.PROTOCOL_TLS) + + host = self.env.cs_host(for_rpc=False) + if isinstance(host, list): + host = host[0] + + if self.env.force_proxy or peer.is_tor: + if not self.proxy: + return + kwargs['proxy'] = self.proxy + kwargs['resolve'] = not peer.is_tor + elif host: + # Use our listening Host/IP for outgoing non-proxy + # connections so our peers see the correct source. + kwargs['local_addr'] = (host, None) + + peer_text = f'[{peer}:{port} {kind}]' + try: + async with timeout_after(120 if peer.is_tor else 30): + async with Connector(PeerSession, peer.host, port, + **kwargs) as session: + await self._verify_peer(session, peer) + is_good = True + break + except BadPeerError as e: + self.logger.error(f'{peer_text} marking bad: ({e})') + peer.mark_bad() + break + except RPCError as e: + self.logger.error(f'{peer_text} RPC error: {e.message} ' + f'({e.code})') + except (OSError, SOCKSError, ConnectionError, TaskTimeout) as e: + self.logger.info(f'{peer_text} {e}') + + if is_good: + now = time.time() + elapsed = now - peer.last_try + self.logger.info(f'{peer_text} verified in {elapsed:.1f}s') + peer.try_count = 0 + peer.last_good = now + peer.source = 'peer' + # At most 2 matches if we're a host name, potentially + # several if we're an IP address (several instances + # can share a NAT). + matches = peer.matches(self.peers) + for match in matches: + if match.ip_address: + if len(matches) > 1: + self.peers.remove(match) + # Force the peer's monitoring task to exit + match.retry_event.set() + elif peer.host in match.features['hosts']: + match.update_features_from_peer(peer) + else: + # Forget the peer if long-term unreachable + if peer.last_good and not peer.bad: + try_limit = 10 + else: + try_limit = 3 + if peer.try_count >= try_limit: + desc = 'bad' if peer.bad else 'unreachable' + self.logger.info(f'forgetting {desc} peer: {peer}') + return True + return False + + async def _verify_peer(self, session, peer): + if not peer.is_tor: + address = session.peer_address() + if address: + peer.ip_addr = address[0] + + # server.version goes first + message = 'server.version' + result = await session.send_request(message, self.server_version_args) + assert_good(message, result, list) + + # Protocol version 1.1 returns a pair with the version first + if len(result) != 2 or not all(isinstance(x, str) for x in result): + raise BadPeerError(f'bad server.version result: {result}') + server_version, protocol_version = result + peer.server_version = server_version + peer.features['server_version'] = server_version + ptuple = protocol_tuple(protocol_version) + + async with TaskGroup() as g: + await g.spawn(self._send_headers_subscribe(session, peer, ptuple)) + await g.spawn(self._send_server_features(session, peer)) + await g.spawn(self._send_peers_subscribe(session, peer)) + + async def _send_headers_subscribe(self, session, peer, ptuple): + message = 'blockchain.headers.subscribe' + result = await session.send_request(message) + assert_good(message, result, dict) + + our_height = self.db.db_height + if ptuple < (1, 3): + their_height = result.get('block_height') + else: + their_height = result.get('height') + if not isinstance(their_height, int): + raise BadPeerError(f'invalid height {their_height}') + if abs(our_height - their_height) > 5: + raise BadPeerError(f'bad height {their_height:,d} ' + f'(ours: {our_height:,d})') + + # Check prior header too in case of hard fork. + check_height = min(our_height, their_height) + raw_header = await self.db.raw_header(check_height) + if ptuple >= (1, 4): + ours = raw_header.hex() + message = 'blockchain.block.header' + theirs = await session.send_request(message, [check_height]) + assert_good(message, theirs, str) + if ours != theirs: + raise BadPeerError(f'our header {ours} and ' + f'theirs {theirs} differ') + else: + ours = self.env.coin.electrum_header(raw_header, check_height) + ours = ours.get('prev_block_hash') + message = 'blockchain.block.get_header' + theirs = await session.send_request(message, [check_height]) + assert_good(message, theirs, dict) + theirs = theirs.get('prev_block_hash') + if ours != theirs: + raise BadPeerError(f'our header hash {ours} and ' + f'theirs {theirs} differ') + + async def _send_server_features(self, session, peer): + message = 'server.features' + features = await session.send_request(message) + assert_good(message, features, dict) + hosts = [host.lower() for host in features.get('hosts', {})] + if self.env.coin.GENESIS_HASH != features.get('genesis_hash'): + raise BadPeerError('incorrect genesis hash') + elif peer.host.lower() in hosts: + peer.update_features(features) + else: + raise BadPeerError(f'not listed in own hosts list {hosts}') + + async def _send_peers_subscribe(self, session, peer): + message = 'server.peers.subscribe' + raw_peers = await session.send_request(message) + assert_good(message, raw_peers, list) + + # Check the peers list we got from a remote peer. + # Each is expected to be of the form: + # [ip_addr, hostname, ['v1.0', 't51001', 's51002']] + # Call add_peer if the remote doesn't appear to know about us. + try: + real_names = [' '.join([u[1]] + u[2]) for u in raw_peers] + peers = [Peer.from_real_name(real_name, str(peer)) + for real_name in real_names] + except Exception: + raise BadPeerError('bad server.peers.subscribe response') + + await self._note_peers(peers) + features = self._features_to_register(peer, peers) + if not features: + return + self.logger.info(f'registering ourself with {peer}') + # We only care to wait for the response + await session.send_request('server.add_peer', [features]) + + # + # External interface + # + async def discover_peers(self): + '''Perform peer maintenance. This includes + + 1) Forgetting unreachable peers. + 2) Verifying connectivity of new peers. + 3) Retrying old peers at regular intervals. + ''' + if self.env.peer_discovery != self.env.PD_ON: + self.logger.info('peer discovery is disabled') + return + + self.logger.info(f'beginning peer discovery. Force use of ' + f'proxy: {self.env.force_proxy}') + forever = Event() + async with self.group as group: + await group.spawn(forever.wait()) + await group.spawn(self._detect_proxy()) + await group.spawn(self._import_peers()) + # Consume tasks as they complete, logging unexpected failures + async for task in group: + if not task.cancelled(): + try: + task.result() + except Exception: + self.logger.exception('task failed unexpectedly') + + def info(self): + '''The number of peers.''' + self._set_peer_statuses() + counter = Counter(peer.status for peer in self.peers) + return { + 'bad': counter[PEER_BAD], + 'good': counter[PEER_GOOD], + 'never': counter[PEER_NEVER], + 'stale': counter[PEER_STALE], + 'total': len(self.peers), + } + + async def add_localRPC_peer(self, real_name): + '''Add a peer passed by the admin over LocalRPC.''' + await self._note_peers([Peer.from_real_name(real_name, 'RPC')]) + + async def on_add_peer(self, features, source_info): + '''Add a peer (but only if the peer resolves to the source).''' + if not source_info: + self.logger.info('ignored add_peer request: no source info') + return False + source = source_info[0] + peers = Peer.peers_from_features(features, source) + if not peers: + self.logger.info('ignored add_peer request: no peers given') + return False + + # Just look at the first peer, require it + peer = peers[0] + host = peer.host + if peer.is_tor: + permit = self._permit_new_onion_peer() + reason = 'rate limiting' + else: + getaddrinfo = asyncio.get_event_loop().getaddrinfo + try: + infos = await getaddrinfo(host, 80, type=socket.SOCK_STREAM) + except socket.gaierror: + permit = False + reason = 'address resolution failure' + else: + permit = any(source == info[-1][0] for info in infos) + reason = 'source-destination mismatch' + + if permit: + self.logger.info(f'accepted add_peer request from {source} ' + f'for {host}') + await self._note_peers([peer], check_ports=True) + else: + self.logger.warning(f'rejected add_peer request from {source} ' + f'for {host} ({reason})') + + return permit + + def on_peers_subscribe(self, is_tor): + '''Returns the server peers as a list of (ip, host, details) tuples. + + We return all peers we've connected to in the last day. + Additionally, if we don't have onion routing, we return a few + hard-coded onion servers. + ''' + cutoff = time.time() - STALE_SECS + recent = [peer for peer in self.peers + if peer.last_good > cutoff and + not peer.bad and peer.is_public] + onion_peers = [] + + # Always report ourselves if valid (even if not public) + peers = set(myself for myself in self.myselves + if myself.last_good > cutoff) + + # Bucket the clearnet peers and select up to two from each + buckets = defaultdict(list) + for peer in recent: + if peer.is_tor: + onion_peers.append(peer) + else: + buckets[peer.bucket()].append(peer) + for bucket_peers in buckets.values(): + random.shuffle(bucket_peers) + peers.update(bucket_peers[:2]) + + # Add up to 20% onion peers (but up to 10 is OK anyway) + random.shuffle(onion_peers) + max_onion = 50 if is_tor else max(10, len(peers) // 4) + + peers.update(onion_peers[:max_onion]) + + return [peer.to_tuple() for peer in peers] + + def proxy_peername(self): + '''Return the peername of the proxy, if there is a proxy, otherwise + None.''' + return self.proxy.peername if self.proxy else None + + def rpc_data(self): + '''Peer data for the peers RPC method.''' + self._set_peer_statuses() + descs = ['good', 'stale', 'never', 'bad'] + + def peer_data(peer): + data = peer.serialize() + data['status'] = descs[peer.status] + return data + + def peer_key(peer): + return (peer.bad, -peer.last_good) + + return [peer_data(peer) for peer in sorted(self.peers, key=peer_key)] diff --git a/torba/server/script.py b/torba/server/script.py new file mode 100644 index 000000000..9ff0047d6 --- /dev/null +++ b/torba/server/script.py @@ -0,0 +1,251 @@ +# Copyright (c) 2016-2017, Neil Booth +# +# All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# and warranty status of this software. + +'''Script-related classes and functions.''' + + +import struct +from collections import namedtuple + +from torba.server.enum import Enumeration +from torba.server.hash import hash160 +from torba.server.util import unpack_le_uint16_from, unpack_le_uint32_from, \ + pack_le_uint16, pack_le_uint32 + + +class ScriptError(Exception): + '''Exception used for script errors.''' + + +OpCodes = Enumeration("Opcodes", [ + ("OP_0", 0), ("OP_PUSHDATA1", 76), + "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", + "OP_RESERVED", + "OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7", "OP_8", + "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16", + "OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", + "OP_ELSE", "OP_ENDIF", "OP_VERIFY", "OP_RETURN", + "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", + "OP_2OVER", "OP_2ROT", "OP_2SWAP", "OP_IFDUP", "OP_DEPTH", "OP_DROP", + "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT", + "OP_SWAP", "OP_TUCK", + "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", + "OP_INVERT", "OP_AND", "OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", + "OP_RESERVED1", "OP_RESERVED2", + "OP_1ADD", "OP_1SUB", "OP_2MUL", "OP_2DIV", "OP_NEGATE", "OP_ABS", + "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV", "OP_MOD", + "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR", "OP_NUMEQUAL", + "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN", "OP_GREATERTHAN", + "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX", + "OP_WITHIN", + "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160", "OP_HASH256", + "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG", + "OP_CHECKMULTISIGVERIFY", + "OP_NOP1", + "OP_CHECKLOCKTIMEVERIFY", "OP_CHECKSEQUENCEVERIFY" +]) + + +# Paranoia to make it hard to create bad scripts +assert OpCodes.OP_DUP == 0x76 +assert OpCodes.OP_HASH160 == 0xa9 +assert OpCodes.OP_EQUAL == 0x87 +assert OpCodes.OP_EQUALVERIFY == 0x88 +assert OpCodes.OP_CHECKSIG == 0xac +assert OpCodes.OP_CHECKMULTISIG == 0xae + + +def _match_ops(ops, pattern): + if len(ops) != len(pattern): + return False + for op, pop in zip(ops, pattern): + if pop != op: + # -1 means 'data push', whose op is an (op, data) tuple + if pop == -1 and isinstance(op, tuple): + continue + return False + + return True + + +class ScriptPubKey(object): + '''A class for handling a tx output script that gives conditions + necessary for spending. + ''' + + TO_ADDRESS_OPS = [OpCodes.OP_DUP, OpCodes.OP_HASH160, -1, + OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG] + TO_P2SH_OPS = [OpCodes.OP_HASH160, -1, OpCodes.OP_EQUAL] + TO_PUBKEY_OPS = [-1, OpCodes.OP_CHECKSIG] + + PayToHandlers = namedtuple('PayToHandlers', 'address script_hash pubkey ' + 'unspendable strange') + + @classmethod + def pay_to(cls, handlers, script): + '''Parse a script, invoke the appropriate handler and + return the result. + + One of the following handlers is invoked: + handlers.address(hash160) + handlers.script_hash(hash160) + handlers.pubkey(pubkey) + handlers.unspendable() + handlers.strange(script) + ''' + try: + ops = Script.get_ops(script) + except ScriptError: + return handlers.unspendable() + + match = _match_ops + + if match(ops, cls.TO_ADDRESS_OPS): + return handlers.address(ops[2][-1]) + if match(ops, cls.TO_P2SH_OPS): + return handlers.script_hash(ops[1][-1]) + if match(ops, cls.TO_PUBKEY_OPS): + return handlers.pubkey(ops[0][-1]) + if ops and ops[0] == OpCodes.OP_RETURN: + return handlers.unspendable() + return handlers.strange(script) + + @classmethod + def P2SH_script(cls, hash160): + return (bytes([OpCodes.OP_HASH160]) + + Script.push_data(hash160) + + bytes([OpCodes.OP_EQUAL])) + + @classmethod + def P2PKH_script(cls, hash160): + return (bytes([OpCodes.OP_DUP, OpCodes.OP_HASH160]) + + Script.push_data(hash160) + + bytes([OpCodes.OP_EQUALVERIFY, OpCodes.OP_CHECKSIG])) + + @classmethod + def validate_pubkey(cls, pubkey, req_compressed=False): + if isinstance(pubkey, (bytes, bytearray)): + if len(pubkey) == 33 and pubkey[0] in (2, 3): + return # Compressed + if len(pubkey) == 65 and pubkey[0] == 4: + if not req_compressed: + return + raise PubKeyError('uncompressed pubkeys are invalid') + raise PubKeyError('invalid pubkey {}'.format(pubkey)) + + @classmethod + def pubkey_script(cls, pubkey): + cls.validate_pubkey(pubkey) + return Script.push_data(pubkey) + bytes([OpCodes.OP_CHECKSIG]) + + @classmethod + def multisig_script(cls, m, pubkeys): + '''Returns the script for a pay-to-multisig transaction.''' + n = len(pubkeys) + if not 1 <= m <= n <= 15: + raise ScriptError('{:d} of {:d} multisig script not possible' + .format(m, n)) + for pubkey in pubkeys: + cls.validate_pubkey(pubkey, req_compressed=True) + # See https://bitcoin.org/en/developer-guide + # 2 of 3 is: OP_2 pubkey1 pubkey2 pubkey3 OP_3 OP_CHECKMULTISIG + return (bytes([OP_1 + m - 1]) + + b''.join(cls.push_data(pubkey) for pubkey in pubkeys) + + bytes([OP_1 + n - 1, OP_CHECK_MULTISIG])) + + +class Script(object): + + @classmethod + def get_ops(cls, script): + ops = [] + + # The unpacks or script[n] below throw on truncated scripts + try: + n = 0 + while n < len(script): + op = script[n] + n += 1 + + if op <= OpCodes.OP_PUSHDATA4: + # Raw bytes follow + if op < OpCodes.OP_PUSHDATA1: + dlen = op + elif op == OpCodes.OP_PUSHDATA1: + dlen = script[n] + n += 1 + elif op == OpCodes.OP_PUSHDATA2: + dlen, = unpack_le_uint16_from(script[n: n + 2]) + n += 2 + else: + dlen, = unpack_le_uint32_from(script[n: n + 4]) + n += 4 + if n + dlen > len(script): + raise IndexError + op = (op, script[n:n + dlen]) + n += dlen + + ops.append(op) + except Exception: + # Truncated script; e.g. tx_hash + # ebc9fa1196a59e192352d76c0f6e73167046b9d37b8302b6bb6968dfd279b767 + raise ScriptError('truncated script') + + return ops + + @classmethod + def push_data(cls, data): + '''Returns the opcodes to push the data on the stack.''' + assert isinstance(data, (bytes, bytearray)) + + n = len(data) + if n < OpCodes.OP_PUSHDATA1: + return bytes([n]) + data + if n < 256: + return bytes([OpCodes.OP_PUSHDATA1, n]) + data + if n < 65536: + return bytes([OpCodes.OP_PUSHDATA2]) + pack_le_uint16(n) + data + return bytes([OpCodes.OP_PUSHDATA4]) + pack_le_uint32(n) + data + + @classmethod + def opcode_name(cls, opcode): + if OpCodes.OP_0 < opcode < OpCodes.OP_PUSHDATA1: + return 'OP_{:d}'.format(opcode) + try: + return OpCodes.whatis(opcode) + except KeyError: + return 'OP_UNKNOWN:{:d}'.format(opcode) + + @classmethod + def dump(cls, script): + opcodes, datas = cls.get_ops(script) + for opcode, data in zip(opcodes, datas): + name = cls.opcode_name(opcode) + if data is None: + print(name) + else: + print('{} {} ({:d} bytes)' + .format(name, data.hex(), len(data))) diff --git a/torba/server/server.py b/torba/server/server.py new file mode 100644 index 000000000..5b22e6b7e --- /dev/null +++ b/torba/server/server.py @@ -0,0 +1,129 @@ +import signal +import time +import logging +from functools import partial +import asyncio + +import torba +from torba.server.db import DB +from torba.server.mempool import MemPool, MemPoolAPI +from torba.server.session import SessionManager + + +class Notifications: + # hashX notifications come from two sources: new blocks and + # mempool refreshes. + # + # A user with a pending transaction is notified after the block it + # gets in is processed. Block processing can take an extended + # time, and the prefetcher might poll the daemon after the mempool + # code in any case. In such cases the transaction will not be in + # the mempool after the mempool refresh. We want to avoid + # notifying clients twice - for the mempool refresh and when the + # block is done. This object handles that logic by deferring + # notifications appropriately. + + def __init__(self): + self._touched_mp = {} + self._touched_bp = {} + self._highest_block = -1 + + async def _maybe_notify(self): + tmp, tbp = self._touched_mp, self._touched_bp + common = set(tmp).intersection(tbp) + if common: + height = max(common) + elif tmp and max(tmp) == self._highest_block: + height = self._highest_block + else: + # Either we are processing a block and waiting for it to + # come in, or we have not yet had a mempool update for the + # new block height + return + touched = tmp.pop(height) + for old in [h for h in tmp if h <= height]: + del tmp[old] + for old in [h for h in tbp if h <= height]: + touched.update(tbp.pop(old)) + await self.notify(height, touched) + + async def notify(self, height, touched): + pass + + async def start(self, height, notify_func): + self._highest_block = height + self.notify = notify_func + await self.notify(height, set()) + + async def on_mempool(self, touched, height): + self._touched_mp[height] = touched + await self._maybe_notify() + + async def on_block(self, touched, height): + self._touched_bp[height] = touched + self._highest_block = height + await self._maybe_notify() + + +class Server: + + def __init__(self, env): + self.env = env + self.log = logging.getLogger(__name__).getChild(self.__class__.__name__) + self.shutdown_event = asyncio.Event() + self.cancellable_tasks = [] + + async def start(self): + env = self.env + min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings() + self.log.info(f'software version: {torba.__version__}') + self.log.info(f'supported protocol versions: {min_str}-{max_str}') + self.log.info(f'event loop policy: {env.loop_policy}') + self.log.info(f'reorg limit is {env.reorg_limit:,d} blocks') + + notifications = Notifications() + Daemon = env.coin.DAEMON + BlockProcessor = env.coin.BLOCK_PROCESSOR + + daemon = Daemon(env.coin, env.daemon_url) + db = DB(env) + bp = BlockProcessor(env, db, daemon, notifications) + + # Set notifications up to implement the MemPoolAPI + notifications.height = daemon.height + notifications.cached_height = daemon.cached_height + notifications.mempool_hashes = daemon.mempool_hashes + notifications.raw_transactions = daemon.getrawtransactions + notifications.lookup_utxos = db.lookup_utxos + MemPoolAPI.register(Notifications) + mempool = MemPool(env.coin, notifications) + + session_mgr = SessionManager( + env, db, bp, daemon, mempool, self.shutdown_event + ) + + await daemon.height() + + def _start_cancellable(run, *args): + _flag = asyncio.Event() + self.cancellable_tasks.append(asyncio.ensure_future(run(*args, _flag))) + return _flag.wait() + + await _start_cancellable(bp.fetch_and_process_blocks) + await db.populate_header_merkle_cache() + await _start_cancellable(mempool.keep_synchronized) + await _start_cancellable(session_mgr.serve, notifications) + + def stop(self): + for task in reversed(self.cancellable_tasks): + task.cancel() + + def run(self): + loop = asyncio.get_event_loop() + try: + loop.add_signal_handler(signal.SIGINT, self.stop) + loop.add_signal_handler(signal.SIGTERM, self.stop) + loop.run_until_complete(self.start()) + loop.run_forever() + finally: + loop.run_until_complete(loop.shutdown_asyncgens()) diff --git a/torba/server/session.py b/torba/server/session.py new file mode 100644 index 000000000..787302075 --- /dev/null +++ b/torba/server/session.py @@ -0,0 +1,1436 @@ +# Copyright (c) 2016-2018, Neil Booth +# +# All rights reserved. +# +# See the file "LICENCE" for information about the copyright +# and warranty status of this software. + +'''Classes for local RPC server and remote client TCP/SSL servers.''' + +import asyncio +import codecs +import datetime +import itertools +import json +import os +import pylru +import ssl +import time +from collections import defaultdict +from functools import partial + +from aiorpcx import ( + RPCSession, JSONRPCAutoDetect, JSONRPCConnection, + TaskGroup, handler_invocation, RPCError, Request, ignore_after, sleep, + Event +) + +import torba +import torba.server.text as text +import torba.server.util as util +from torba.server.hash import (sha256, hash_to_hex_str, hex_str_to_hash, + HASHX_LEN, Base58Error) +from torba.server.peer import Peer +from torba.server.daemon import DaemonError +from torba.server.peers import PeerManager + + +BAD_REQUEST = 1 +DAEMON_ERROR = 2 + + +def scripthash_to_hashX(scripthash): + try: + bin_hash = hex_str_to_hash(scripthash) + if len(bin_hash) == 32: + return bin_hash[:HASHX_LEN] + except Exception: + pass + raise RPCError(BAD_REQUEST, f'{scripthash} is not a valid script hash') + + +def non_negative_integer(value): + '''Return param value it is or can be converted to a non-negative + integer, otherwise raise an RPCError.''' + try: + value = int(value) + if value >= 0: + return value + except ValueError: + pass + raise RPCError(BAD_REQUEST, + f'{value} should be a non-negative integer') + + +def assert_boolean(value): + '''Return param value it is boolean otherwise raise an RPCError.''' + if value in (False, True): + return value + raise RPCError(BAD_REQUEST, f'{value} should be a boolean value') + + +def assert_tx_hash(value): + '''Raise an RPCError if the value is not a valid transaction + hash.''' + try: + if len(util.hex_to_bytes(value)) == 32: + return + except Exception: + pass + raise RPCError(BAD_REQUEST, f'{value} should be a transaction hash') + + +class Semaphores(object): + '''For aiorpcX's semaphore handling.''' + + def __init__(self, semaphores): + self.semaphores = semaphores + self.acquired = [] + + async def __aenter__(self): + for semaphore in self.semaphores: + await semaphore.acquire() + self.acquired.append(semaphore) + + async def __aexit__(self, exc_type, exc_value, traceback): + for semaphore in self.acquired: + semaphore.release() + + +class SessionGroup(object): + + def __init__(self, gid): + self.gid = gid + # Concurrency per group + self.semaphore = asyncio.Semaphore(20) + + +class SessionManager(object): + '''Holds global state about all sessions.''' + + def __init__(self, env, db, bp, daemon, mempool, shutdown_event): + env.max_send = max(350000, env.max_send) + self.env = env + self.db = db + self.bp = bp + self.daemon = daemon + self.mempool = mempool + self.peer_mgr = PeerManager(env, db) + self.shutdown_event = shutdown_event + self.logger = util.class_logger(__name__, self.__class__.__name__) + self.servers = {} + self.sessions = set() + self.max_subs = env.max_subs + self.cur_group = SessionGroup(0) + self.txs_sent = 0 + self.start_time = time.time() + self.history_cache = pylru.lrucache(256) + self.notified_height = None + # Cache some idea of room to avoid recounting on each subscription + self.subs_room = 0 + # Masternode stuff only for such coins + if issubclass(env.coin.SESSIONCLS, DashElectrumX): + self.mn_cache_height = 0 + self.mn_cache = [] + + self.session_event = Event() + + # Set up the RPC request handlers + cmds = ('add_peer daemon_url disconnect getinfo groups log peers ' + 'query reorg sessions stop'.split()) + LocalRPC.request_handlers = {cmd: getattr(self, 'rpc_' + cmd) + for cmd in cmds} + + async def _start_server(self, kind, *args, **kw_args): + loop = asyncio.get_event_loop() + if kind == 'RPC': + protocol_class = LocalRPC + else: + protocol_class = self.env.coin.SESSIONCLS + protocol_factory = partial(protocol_class, self, self.db, + self.mempool, self.peer_mgr, kind) + server = loop.create_server(protocol_factory, *args, **kw_args) + + host, port = args[:2] + try: + self.servers[kind] = await server + except OSError as e: # don't suppress CancelledError + self.logger.error(f'{kind} server failed to listen on {host}:' + f'{port:d} :{e!r}') + else: + self.logger.info(f'{kind} server listening on {host}:{port:d}') + + async def _start_external_servers(self): + '''Start listening on TCP and SSL ports, but only if the respective + port was given in the environment. + ''' + env = self.env + host = env.cs_host(for_rpc=False) + if env.tcp_port is not None: + await self._start_server('TCP', host, env.tcp_port) + if env.ssl_port is not None: + sslc = ssl.SSLContext(ssl.PROTOCOL_TLS) + sslc.load_cert_chain(env.ssl_certfile, keyfile=env.ssl_keyfile) + await self._start_server('SSL', host, env.ssl_port, ssl=sslc) + + async def _close_servers(self, kinds): + '''Close the servers of the given kinds (TCP etc.).''' + if kinds: + self.logger.info('closing down {} listening servers' + .format(', '.join(kinds))) + for kind in kinds: + server = self.servers.pop(kind, None) + if server: + server.close() + await server.wait_closed() + + async def _manage_servers(self): + paused = False + max_sessions = self.env.max_sessions + low_watermark = max_sessions * 19 // 20 + while True: + await self.session_event.wait() + self.session_event.clear() + if not paused and len(self.sessions) >= max_sessions: + self.logger.info(f'maximum sessions {max_sessions:,d} ' + f'reached, stopping new connections until ' + f'count drops to {low_watermark:,d}') + await self._close_servers(['TCP', 'SSL']) + paused = True + # Start listening for incoming connections if paused and + # session count has fallen + if paused and len(self.sessions) <= low_watermark: + self.logger.info('resuming listening for incoming connections') + await self._start_external_servers() + paused = False + + async def _log_sessions(self): + '''Periodically log sessions.''' + log_interval = self.env.log_sessions + if log_interval: + while True: + await sleep(log_interval) + data = self._session_data(for_log=True) + for line in text.sessions_lines(data): + self.logger.info(line) + self.logger.info(json.dumps(self._get_info())) + + def _group_map(self): + group_map = defaultdict(list) + for session in self.sessions: + group_map[session.group].append(session) + return group_map + + def _sub_count(self): + return sum(s.sub_count() for s in self.sessions) + + def _lookup_session(self, session_id): + try: + session_id = int(session_id) + except Exception: + pass + else: + for session in self.sessions: + if session.session_id == session_id: + return session + return None + + async def _for_each_session(self, session_ids, operation): + if not isinstance(session_ids, list): + raise RPCError(BAD_REQUEST, 'expected a list of session IDs') + + result = [] + for session_id in session_ids: + session = self._lookup_session(session_id) + if session: + result.append(await operation(session)) + else: + result.append(f'unknown session: {session_id}') + return result + + async def _clear_stale_sessions(self): + '''Cut off sessions that haven't done anything for 10 minutes.''' + while True: + await sleep(60) + stale_cutoff = time.time() - self.env.session_timeout + stale_sessions = [session for session in self.sessions + if session.last_recv < stale_cutoff] + if stale_sessions: + text = ', '.join(str(session.session_id) + for session in stale_sessions) + self.logger.info(f'closing stale connections {text}') + # Give the sockets some time to close gracefully + async with TaskGroup() as group: + for session in stale_sessions: + await group.spawn(session.close()) + + # Consolidate small groups + bw_limit = self.env.bandwidth_limit + group_map = self._group_map() + groups = [group for group, sessions in group_map.items() + if len(sessions) <= 5 and + sum(s.bw_charge for s in sessions) < bw_limit] + if len(groups) > 1: + new_group = groups[-1] + for group in groups: + for session in group_map[group]: + session.group = new_group + + def _get_info(self): + '''A summary of server state.''' + group_map = self._group_map() + return { + 'closing': len([s for s in self.sessions if s.is_closing()]), + 'daemon': self.daemon.logged_url(), + 'daemon_height': self.daemon.cached_height(), + 'db_height': self.db.db_height, + 'errors': sum(s.errors for s in self.sessions), + 'groups': len(group_map), + 'logged': len([s for s in self.sessions if s.log_me]), + 'paused': sum(not s.can_send.is_set() for s in self.sessions), + 'pid': os.getpid(), + 'peers': self.peer_mgr.info(), + 'requests': sum(s.count_pending_items() for s in self.sessions), + 'sessions': self.session_count(), + 'subs': self._sub_count(), + 'txs_sent': self.txs_sent, + 'uptime': util.formatted_time(time.time() - self.start_time), + 'version': torba.__version__, + } + + def _session_data(self, for_log): + '''Returned to the RPC 'sessions' call.''' + now = time.time() + sessions = sorted(self.sessions, key=lambda s: s.start_time) + return [(session.session_id, + session.flags(), + session.peer_address_str(for_log=for_log), + session.client, + session.protocol_version_string(), + session.count_pending_items(), + session.txs_sent, + session.sub_count(), + session.recv_count, session.recv_size, + session.send_count, session.send_size, + now - session.start_time) + for session in sessions] + + def _group_data(self): + '''Returned to the RPC 'groups' call.''' + result = [] + group_map = self._group_map() + for group, sessions in group_map.items(): + result.append([group.gid, + len(sessions), + sum(s.bw_charge for s in sessions), + sum(s.count_pending_items() for s in sessions), + sum(s.txs_sent for s in sessions), + sum(s.sub_count() for s in sessions), + sum(s.recv_count for s in sessions), + sum(s.recv_size for s in sessions), + sum(s.send_count for s in sessions), + sum(s.send_size for s in sessions), + ]) + return result + + async def _electrum_and_raw_headers(self, height): + raw_header = await self.raw_header(height) + electrum_header = self.env.coin.electrum_header(raw_header, height) + return electrum_header, raw_header + + async def _refresh_hsub_results(self, height): + '''Refresh the cached header subscription responses to be for height, + and record that as notified_height. + ''' + # Paranoia: a reorg could race and leave db_height lower + height = min(height, self.db.db_height) + electrum, raw = await self._electrum_and_raw_headers(height) + self.hsub_results = (electrum, {'hex': raw.hex(), 'height': height}) + self.notified_height = height + + # --- LocalRPC command handlers + + async def rpc_add_peer(self, real_name): + '''Add a peer. + + real_name: "bch.electrumx.cash t50001 s50002" for example + ''' + await self.peer_mgr.add_localRPC_peer(real_name) + return "peer '{}' added".format(real_name) + + async def rpc_disconnect(self, session_ids): + '''Disconnect sesssions. + + session_ids: array of session IDs + ''' + async def close(session): + '''Close the session's transport.''' + await session.close(force_after=2) + return f'disconnected {session.session_id}' + + return await self._for_each_session(session_ids, close) + + async def rpc_log(self, session_ids): + '''Toggle logging of sesssions. + + session_ids: array of session IDs + ''' + async def toggle_logging(session): + '''Toggle logging of the session.''' + session.toggle_logging() + return f'log {session.session_id}: {session.log_me}' + + return await self._for_each_session(session_ids, toggle_logging) + + async def rpc_daemon_url(self, daemon_url): + '''Replace the daemon URL.''' + daemon_url = daemon_url or self.env.daemon_url + try: + self.daemon.set_url(daemon_url) + except Exception as e: + raise RPCError(BAD_REQUEST, f'an error occured: {e!r}') + return f'now using daemon at {self.daemon.logged_url()}' + + async def rpc_stop(self): + '''Shut down the server cleanly.''' + self.shutdown_event.set() + return 'stopping' + + async def rpc_getinfo(self): + '''Return summary information about the server process.''' + return self._get_info() + + async def rpc_groups(self): + '''Return statistics about the session groups.''' + return self._group_data() + + async def rpc_peers(self): + '''Return a list of data about server peers.''' + return self.peer_mgr.rpc_data() + + async def rpc_query(self, items, limit): + '''Return a list of data about server peers.''' + coin = self.env.coin + db = self.db + lines = [] + + def arg_to_hashX(arg): + try: + script = bytes.fromhex(arg) + lines.append(f'Script: {arg}') + return coin.hashX_from_script(script) + except ValueError: + pass + + try: + hashX = coin.address_to_hashX(arg) + except Base58Error as e: + lines.append(e.args[0]) + return None + lines.append(f'Address: {arg}') + return hashX + + for arg in items: + hashX = arg_to_hashX(arg) + if not hashX: + continue + n = None + history = await db.limited_history(hashX, limit=limit) + for n, (tx_hash, height) in enumerate(history): + lines.append(f'History #{n:,d}: height {height:,d} ' + f'tx_hash {hash_to_hex_str(tx_hash)}') + if n is None: + lines.append('No history found') + n = None + utxos = await db.all_utxos(hashX) + for n, utxo in enumerate(utxos, start=1): + lines.append(f'UTXO #{n:,d}: tx_hash ' + f'{hash_to_hex_str(utxo.tx_hash)} ' + f'tx_pos {utxo.tx_pos:,d} height ' + f'{utxo.height:,d} value {utxo.value:,d}') + if n == limit: + break + if n is None: + lines.append('No UTXOs found') + + balance = sum(utxo.value for utxo in utxos) + lines.append(f'Balance: {coin.decimal_value(balance):,f} ' + f'{coin.SHORTNAME}') + + return lines + + async def rpc_sessions(self): + '''Return statistics about connected sessions.''' + return self._session_data(for_log=False) + + async def rpc_reorg(self, count): + '''Force a reorg of the given number of blocks. + + count: number of blocks to reorg + ''' + count = non_negative_integer(count) + if not self.bp.force_chain_reorg(count): + raise RPCError(BAD_REQUEST, 'still catching up with daemon') + return f'scheduled a reorg of {count:,d} blocks' + + # --- External Interface + + async def serve(self, notifications, server_listening_event): + '''Start the RPC server if enabled. When the event is triggered, + start TCP and SSL servers.''' + try: + if self.env.rpc_port is not None: + await self._start_server('RPC', self.env.cs_host(for_rpc=True), + self.env.rpc_port) + self.logger.info(f'max session count: {self.env.max_sessions:,d}') + self.logger.info(f'session timeout: ' + f'{self.env.session_timeout:,d} seconds') + self.logger.info('session bandwidth limit {:,d} bytes' + .format(self.env.bandwidth_limit)) + self.logger.info('max response size {:,d} bytes' + .format(self.env.max_send)) + self.logger.info('max subscriptions across all sessions: {:,d}' + .format(self.max_subs)) + self.logger.info('max subscriptions per session: {:,d}' + .format(self.env.max_session_subs)) + if self.env.drop_client is not None: + self.logger.info('drop clients matching: {}' + .format(self.env.drop_client.pattern)) + # Start notifications; initialize hsub_results + await notifications.start(self.db.db_height, self._notify_sessions) + await self._start_external_servers() + server_listening_event.set() + # Peer discovery should start after the external servers + # because we connect to ourself + async with TaskGroup() as group: + await group.spawn(self.peer_mgr.discover_peers()) + await group.spawn(self._clear_stale_sessions()) + await group.spawn(self._log_sessions()) + await group.spawn(self._manage_servers()) + finally: + # Close servers and sessions + await self._close_servers(list(self.servers.keys())) + async with TaskGroup() as group: + for session in list(self.sessions): + await group.spawn(session.close(force_after=1)) + + def session_count(self): + '''The number of connections that we've sent something to.''' + return len(self.sessions) + + async def daemon_request(self, method, *args): + '''Catch a DaemonError and convert it to an RPCError.''' + try: + return await getattr(self.daemon, method)(*args) + except DaemonError as e: + raise RPCError(DAEMON_ERROR, f'daemon error: {e!r}') from None + + async def raw_header(self, height): + '''Return the binary header at the given height.''' + try: + return await self.db.raw_header(height) + except IndexError: + raise RPCError(BAD_REQUEST, f'height {height:,d} ' + 'out of range') from None + + async def electrum_header(self, height): + '''Return the deserialized header at the given height.''' + electrum_header, _ = await self._electrum_and_raw_headers(height) + return electrum_header + + async def broadcast_transaction(self, raw_tx): + hex_hash = await self.daemon.broadcast_transaction(raw_tx) + self.txs_sent += 1 + return hex_hash + + async def limited_history(self, hashX): + '''A caching layer.''' + hc = self.history_cache + if hashX not in hc: + # History DoS limit. Each element of history is about 99 + # bytes when encoded as JSON. This limits resource usage + # on bloated history requests, and uses a smaller divisor + # so large requests are logged before refusing them. + limit = self.env.max_send // 97 + hc[hashX] = await self.db.limited_history(hashX, limit=limit) + return hc[hashX] + + async def _notify_sessions(self, height, touched): + '''Notify sessions about height changes and touched addresses.''' + height_changed = height != self.notified_height + if height_changed: + await self._refresh_hsub_results(height) + # Invalidate our history cache for touched hashXs + hc = self.history_cache + for hashX in set(hc).intersection(touched): + del hc[hashX] + + async with TaskGroup() as group: + for session in self.sessions: + await group.spawn(session.notify(touched, height_changed)) + + def add_session(self, session): + self.sessions.add(session) + self.session_event.set() + gid = int(session.start_time - self.start_time) // 900 + if self.cur_group.gid != gid: + self.cur_group = SessionGroup(gid) + return self.cur_group + + def remove_session(self, session): + '''Remove a session from our sessions list if there.''' + self.sessions.remove(session) + self.session_event.set() + + def new_subscription(self): + if self.subs_room <= 0: + self.subs_room = self.max_subs - self._sub_count() + if self.subs_room <= 0: + raise RPCError(BAD_REQUEST, f'server subscription limit ' + f'{self.max_subs:,d} reached') + self.subs_room -= 1 + + +class SessionBase(RPCSession): + '''Base class of ElectrumX JSON sessions. + + Each session runs its tasks in asynchronous parallelism with other + sessions. + ''' + + MAX_CHUNK_SIZE = 2016 + session_counter = itertools.count() + + def __init__(self, session_mgr, db, mempool, peer_mgr, kind): + connection = JSONRPCConnection(JSONRPCAutoDetect) + super().__init__(connection=connection) + self.logger = util.class_logger(__name__, self.__class__.__name__) + self.session_mgr = session_mgr + self.db = db + self.mempool = mempool + self.peer_mgr = peer_mgr + self.kind = kind # 'RPC', 'TCP' etc. + self.env = session_mgr.env + self.coin = self.env.coin + self.client = 'unknown' + self.anon_logs = self.env.anon_logs + self.txs_sent = 0 + self.log_me = False + self.bw_limit = self.env.bandwidth_limit + self.daemon_request = self.session_mgr.daemon_request + # Hijack the connection so we can log messages + self._receive_message_orig = self.connection.receive_message + self.connection.receive_message = self.receive_message + + async def notify(self, touched, height_changed): + pass + + def peer_address_str(self, *, for_log=True): + '''Returns the peer's IP address and port as a human-readable + string, respecting anon logs if the output is for a log.''' + if for_log and self.anon_logs: + return 'xx.xx.xx.xx:xx' + return super().peer_address_str() + + def receive_message(self, message): + if self.log_me: + self.logger.info(f'processing {message}') + return self._receive_message_orig(message) + + def toggle_logging(self): + self.log_me = not self.log_me + + def flags(self): + '''Status flags.''' + status = self.kind[0] + if self.is_closing(): + status += 'C' + if self.log_me: + status += 'L' + status += str(self.concurrency.max_concurrent) + return status + + def connection_made(self, transport): + '''Handle an incoming client connection.''' + super().connection_made(transport) + self.session_id = next(self.session_counter) + context = {'conn_id': f'{self.session_id}'} + self.logger = util.ConnectionLogger(self.logger, context) + self.group = self.session_mgr.add_session(self) + self.logger.info(f'{self.kind} {self.peer_address_str()}, ' + f'{self.session_mgr.session_count():,d} total') + + def connection_lost(self, exc): + '''Handle client disconnection.''' + super().connection_lost(exc) + self.session_mgr.remove_session(self) + msg = '' + if not self.can_send.is_set(): + msg += ' whilst paused' + if self.concurrency.max_concurrent != self.max_concurrent: + msg += ' whilst throttled' + if self.send_size >= 1024*1024: + msg += ('. Sent {:,d} bytes in {:,d} messages' + .format(self.send_size, self.send_count)) + if msg: + msg = 'disconnected' + msg + self.logger.info(msg) + + def count_pending_items(self): + return len(self.connection.pending_requests()) + + def semaphore(self): + return Semaphores([self.concurrency.semaphore, self.group.semaphore]) + + def sub_count(self): + return 0 + + async def handle_request(self, request): + '''Handle an incoming request. ElectrumX doesn't receive + notifications from client sessions. + ''' + if isinstance(request, Request): + handler = self.request_handlers.get(request.method) + else: + handler = None + coro = handler_invocation(handler, request)() + return await coro + + +class ElectrumX(SessionBase): + '''A TCP server that handles incoming Electrum connections.''' + + PROTOCOL_MIN = (1, 1) + PROTOCOL_MAX = (1, 4) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.subscribe_headers = False + self.subscribe_headers_raw = False + self.connection.max_response_size = self.env.max_send + self.max_subs = self.env.max_session_subs + self.hashX_subs = {} + self.sv_seen = False + self.mempool_statuses = {} + self.set_request_handlers(self.PROTOCOL_MIN) + + @classmethod + def protocol_min_max_strings(cls): + return [util.version_string(ver) + for ver in (cls.PROTOCOL_MIN, cls.PROTOCOL_MAX)] + + @classmethod + def server_features(cls, env): + '''Return the server features dictionary.''' + min_str, max_str = cls.protocol_min_max_strings() + return { + 'hosts': env.hosts_dict(), + 'pruning': None, + 'server_version': torba.__version__, + 'protocol_min': min_str, + 'protocol_max': max_str, + 'genesis_hash': env.coin.GENESIS_HASH, + 'hash_function': 'sha256', + } + + async def server_features_async(self): + return self.server_features(self.env) + + @classmethod + def server_version_args(cls): + '''The arguments to a server.version RPC call to a peer.''' + return [torba.__version__, cls.protocol_min_max_strings()] + + def protocol_version_string(self): + return util.version_string(self.protocol_tuple) + + def sub_count(self): + return len(self.hashX_subs) + + async def notify(self, touched, height_changed): + '''Notify the client about changes to touched addresses (from mempool + updates or new blocks) and height. + ''' + if height_changed and self.subscribe_headers: + args = (await self.subscribe_headers_result(), ) + await self.send_notification('blockchain.headers.subscribe', args) + + touched = touched.intersection(self.hashX_subs) + if touched or (height_changed and self.mempool_statuses): + changed = {} + + for hashX in touched: + alias = self.hashX_subs[hashX] + status = await self.address_status(hashX) + changed[alias] = status + + # Check mempool hashXs - the status is a function of the + # confirmed state of other transactions. Note: we cannot + # iterate over mempool_statuses as it changes size. + for hashX in tuple(self.mempool_statuses): + # Items can be evicted whilst await-ing status; False + # ensures such hashXs are notified + old_status = self.mempool_statuses.get(hashX, False) + status = await self.address_status(hashX) + if status != old_status: + alias = self.hashX_subs[hashX] + changed[alias] = status + + for alias, status in changed.items(): + if len(alias) == 64: + method = 'blockchain.scripthash.subscribe' + else: + method = 'blockchain.address.subscribe' + await self.send_notification(method, (alias, status)) + + if changed: + es = '' if len(changed) == 1 else 'es' + self.logger.info(f'notified of {len(changed):,d} address{es}') + + async def subscribe_headers_result(self): + '''The result of a header subscription or notification.''' + return self.session_mgr.hsub_results[self.subscribe_headers_raw] + + async def _headers_subscribe(self, raw): + '''Subscribe to get headers of new blocks.''' + self.subscribe_headers_raw = assert_boolean(raw) + self.subscribe_headers = True + return await self.subscribe_headers_result() + + async def headers_subscribe(self): + '''Subscribe to get raw headers of new blocks.''' + return await self._headers_subscribe(True) + + async def headers_subscribe_True(self, raw=True): + '''Subscribe to get headers of new blocks.''' + return await self._headers_subscribe(raw) + + async def headers_subscribe_False(self, raw=False): + '''Subscribe to get headers of new blocks.''' + return await self._headers_subscribe(raw) + + async def add_peer(self, features): + '''Add a peer (but only if the peer resolves to the source).''' + return await self.peer_mgr.on_add_peer(features, self.peer_address()) + + async def peers_subscribe(self): + '''Return the server peers as a list of (ip, host, details) tuples.''' + return self.peer_mgr.on_peers_subscribe(self.is_tor()) + + async def address_status(self, hashX): + '''Returns an address status. + + Status is a hex string, but must be None if there is no history. + ''' + # Note history is ordered and mempool unordered in electrum-server + # For mempool, height is -1 if it has unconfirmed inputs, otherwise 0 + db_history = await self.session_mgr.limited_history(hashX) + mempool = await self.mempool.transaction_summaries(hashX) + + status = ''.join(f'{hash_to_hex_str(tx_hash)}:' + f'{height:d}:' + for tx_hash, height in db_history) + status += ''.join(f'{hash_to_hex_str(tx.hash)}:' + f'{-tx.has_unconfirmed_inputs:d}:' + for tx in mempool) + if status: + status = sha256(status.encode()).hex() + else: + status = None + + if mempool: + self.mempool_statuses[hashX] = status + else: + self.mempool_statuses.pop(hashX, None) + + return status + + async def hashX_listunspent(self, hashX): + '''Return the list of UTXOs of a script hash, including mempool + effects.''' + utxos = await self.db.all_utxos(hashX) + utxos = sorted(utxos) + utxos.extend(await self.mempool.unordered_UTXOs(hashX)) + spends = await self.mempool.potential_spends(hashX) + + return [{'tx_hash': hash_to_hex_str(utxo.tx_hash), + 'tx_pos': utxo.tx_pos, + 'height': utxo.height, 'value': utxo.value} + for utxo in utxos + if (utxo.tx_hash, utxo.tx_pos) not in spends] + + async def hashX_subscribe(self, hashX, alias): + # First check our limit. + if len(self.hashX_subs) >= self.max_subs: + raise RPCError(BAD_REQUEST, 'your address subscription limit ' + f'{self.max_subs:,d} reached') + + # Now let the session manager check its limit + self.session_mgr.new_subscription() + self.hashX_subs[hashX] = alias + return await self.address_status(hashX) + + def address_to_hashX(self, address): + try: + return self.coin.address_to_hashX(address) + except Exception: + pass + raise RPCError(BAD_REQUEST, f'{address} is not a valid address') + + async def address_get_balance(self, address): + '''Return the confirmed and unconfirmed balance of an address.''' + hashX = self.address_to_hashX(address) + return await self.get_balance(hashX) + + async def address_get_history(self, address): + '''Return the confirmed and unconfirmed history of an address.''' + hashX = self.address_to_hashX(address) + return await self.confirmed_and_unconfirmed_history(hashX) + + async def address_get_mempool(self, address): + '''Return the mempool transactions touching an address.''' + hashX = self.address_to_hashX(address) + return await self.unconfirmed_history(hashX) + + async def address_listunspent(self, address): + '''Return the list of UTXOs of an address.''' + hashX = self.address_to_hashX(address) + return await self.hashX_listunspent(hashX) + + async def address_subscribe(self, address): + '''Subscribe to an address. + + address: the address to subscribe to''' + hashX = self.address_to_hashX(address) + return await self.hashX_subscribe(hashX, address) + + async def get_balance(self, hashX): + utxos = await self.db.all_utxos(hashX) + confirmed = sum(utxo.value for utxo in utxos) + unconfirmed = await self.mempool.balance_delta(hashX) + return {'confirmed': confirmed, 'unconfirmed': unconfirmed} + + async def scripthash_get_balance(self, scripthash): + '''Return the confirmed and unconfirmed balance of a scripthash.''' + hashX = scripthash_to_hashX(scripthash) + return await self.get_balance(hashX) + + async def unconfirmed_history(self, hashX): + # Note unconfirmed history is unordered in electrum-server + # height is -1 if it has unconfirmed inputs, otherwise 0 + return [{'tx_hash': hash_to_hex_str(tx.hash), + 'height': -tx.has_unconfirmed_inputs, + 'fee': tx.fee} + for tx in await self.mempool.transaction_summaries(hashX)] + + async def confirmed_and_unconfirmed_history(self, hashX): + # Note history is ordered but unconfirmed is unordered in e-s + history = await self.session_mgr.limited_history(hashX) + conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height} + for tx_hash, height in history] + return conf + await self.unconfirmed_history(hashX) + + async def scripthash_get_history(self, scripthash): + '''Return the confirmed and unconfirmed history of a scripthash.''' + hashX = scripthash_to_hashX(scripthash) + return await self.confirmed_and_unconfirmed_history(hashX) + + async def scripthash_get_mempool(self, scripthash): + '''Return the mempool transactions touching a scripthash.''' + hashX = scripthash_to_hashX(scripthash) + return await self.unconfirmed_history(hashX) + + async def scripthash_listunspent(self, scripthash): + '''Return the list of UTXOs of a scripthash.''' + hashX = scripthash_to_hashX(scripthash) + return await self.hashX_listunspent(hashX) + + async def scripthash_subscribe(self, scripthash): + '''Subscribe to a script hash. + + scripthash: the SHA256 hash of the script to subscribe to''' + hashX = scripthash_to_hashX(scripthash) + return await self.hashX_subscribe(hashX, scripthash) + + async def _merkle_proof(self, cp_height, height): + max_height = self.db.db_height + if not height <= cp_height <= max_height: + raise RPCError(BAD_REQUEST, + f'require header height {height:,d} <= ' + f'cp_height {cp_height:,d} <= ' + f'chain height {max_height:,d}') + branch, root = await self.db.header_branch_and_root(cp_height + 1, + height) + return { + 'branch': [hash_to_hex_str(elt) for elt in branch], + 'root': hash_to_hex_str(root), + } + + async def block_header(self, height, cp_height=0): + '''Return a raw block header as a hexadecimal string, or as a + dictionary with a merkle proof.''' + height = non_negative_integer(height) + cp_height = non_negative_integer(cp_height) + raw_header_hex = (await self.session_mgr.raw_header(height)).hex() + if cp_height == 0: + return raw_header_hex + result = {'header': raw_header_hex} + result.update(await self._merkle_proof(cp_height, height)) + return result + + async def block_header_13(self, height): + '''Return a raw block header as a hexadecimal string. + + height: the header's height''' + return await self.block_header(height) + + async def block_headers(self, start_height, count, cp_height=0): + '''Return count concatenated block headers as hex for the main chain; + starting at start_height. + + start_height and count must be non-negative integers. At most + MAX_CHUNK_SIZE headers will be returned. + ''' + start_height = non_negative_integer(start_height) + count = non_negative_integer(count) + cp_height = non_negative_integer(cp_height) + + max_size = self.MAX_CHUNK_SIZE + count = min(count, max_size) + headers, count = await self.db.read_headers(start_height, count) + result = {'hex': headers.hex(), 'count': count, 'max': max_size} + if count and cp_height: + last_height = start_height + count - 1 + result.update(await self._merkle_proof(cp_height, last_height)) + return result + + async def block_headers_12(self, start_height, count): + return await self.block_headers(start_height, count) + + async def block_get_chunk(self, index): + '''Return a chunk of block headers as a hexadecimal string. + + index: the chunk index''' + index = non_negative_integer(index) + size = self.coin.CHUNK_SIZE + start_height = index * size + headers, _ = await self.db.read_headers(start_height, size) + return headers.hex() + + async def block_get_header(self, height): + '''The deserialized header at a given height. + + height: the header's height''' + height = non_negative_integer(height) + return await self.session_mgr.electrum_header(height) + + def is_tor(self): + '''Try to detect if the connection is to a tor hidden service we are + running.''' + peername = self.peer_mgr.proxy_peername() + if not peername: + return False + peer_address = self.peer_address() + return peer_address and peer_address[0] == peername[0] + + async def replaced_banner(self, banner): + network_info = await self.daemon_request('getnetworkinfo') + ni_version = network_info['version'] + major, minor = divmod(ni_version, 1000000) + minor, revision = divmod(minor, 10000) + revision //= 100 + daemon_version = '{:d}.{:d}.{:d}'.format(major, minor, revision) + for pair in [ + ('$SERVER_VERSION', torba.__version__), + ('$DAEMON_VERSION', daemon_version), + ('$DAEMON_SUBVERSION', network_info['subversion']), + ('$DONATION_ADDRESS', self.env.donation_address), + ]: + banner = banner.replace(*pair) + return banner + + async def donation_address(self): + '''Return the donation address as a string, empty if there is none.''' + return self.env.donation_address + + async def banner(self): + '''Return the server banner text.''' + banner = f'You are connected to an {torba.__version__} server.' + + if self.is_tor(): + banner_file = self.env.tor_banner_file + else: + banner_file = self.env.banner_file + if banner_file: + try: + with codecs.open(banner_file, 'r', 'utf-8') as f: + banner = f.read() + except Exception as e: + self.logger.error(f'reading banner file {banner_file}: {e!r}') + else: + banner = await self.replaced_banner(banner) + + return banner + + async def relayfee(self): + '''The minimum fee a low-priority tx must pay in order to be accepted + to the daemon's memory pool.''' + return await self.daemon_request('relayfee') + + async def estimatefee(self, number): + '''The estimated transaction fee per kilobyte to be paid for a + transaction to be included within a certain number of blocks. + + number: the number of blocks + ''' + number = non_negative_integer(number) + return await self.daemon_request('estimatefee', number) + + async def ping(self): + '''Serves as a connection keep-alive mechanism and for the client to + confirm the server is still responding. + ''' + return None + + async def server_version(self, client_name='', protocol_version=None): + '''Returns the server version as a string. + + client_name: a string identifying the client + protocol_version: the protocol version spoken by the client + ''' + if self.sv_seen and self.protocol_tuple >= (1, 4): + raise RPCError(BAD_REQUEST, f'server.version already sent') + self.sv_seen = True + + if client_name: + client_name = str(client_name) + if self.env.drop_client is not None and \ + self.env.drop_client.match(client_name): + self.close_after_send = True + raise RPCError(BAD_REQUEST, + f'unsupported client: {client_name}') + self.client = client_name[:17] + + # Find the highest common protocol version. Disconnect if + # that protocol version in unsupported. + ptuple, client_min = util.protocol_version( + protocol_version, self.PROTOCOL_MIN, self.PROTOCOL_MAX) + if ptuple is None: + if client_min > self.PROTOCOL_MIN: + self.logger.info(f'client requested future protocol version ' + f'{util.version_string(client_min)} ' + f'- is your software out of date?') + self.close_after_send = True + raise RPCError(BAD_REQUEST, + f'unsupported protocol version: {protocol_version}') + self.set_request_handlers(ptuple) + + return torba.__version__, self.protocol_version_string() + + async def transaction_broadcast(self, raw_tx): + '''Broadcast a raw transaction to the network. + + raw_tx: the raw transaction as a hexadecimal string''' + # This returns errors as JSON RPC errors, as is natural + try: + hex_hash = await self.session_mgr.broadcast_transaction(raw_tx) + self.txs_sent += 1 + self.logger.info(f'sent tx: {hex_hash}') + return hex_hash + except DaemonError as e: + error, = e.args + message = error['message'] + self.logger.info(f'error sending transaction: {message}') + raise RPCError(BAD_REQUEST, 'the transaction was rejected by ' + f'network rules.\n\n{message}\n[{raw_tx}]') + + async def transaction_get(self, tx_hash, verbose=False): + '''Return the serialized raw transaction given its hash + + tx_hash: the transaction hash as a hexadecimal string + verbose: passed on to the daemon + ''' + assert_tx_hash(tx_hash) + if verbose not in (True, False): + raise RPCError(BAD_REQUEST, f'"verbose" must be a boolean') + + return await self.daemon_request('getrawtransaction', tx_hash, verbose) + + async def _block_hash_and_tx_hashes(self, height): + '''Returns a pair (block_hash, tx_hashes) for the main chain block at + the given height. + + block_hash is a hexadecimal string, and tx_hashes is an + ordered list of hexadecimal strings. + ''' + height = non_negative_integer(height) + hex_hashes = await self.daemon_request('block_hex_hashes', height, 1) + block_hash = hex_hashes[0] + block = await self.daemon_request('deserialised_block', block_hash) + return block_hash, block['tx'] + + def _get_merkle_branch(self, tx_hashes, tx_pos): + '''Return a merkle branch to a transaction. + + tx_hashes: ordered list of hex strings of tx hashes in a block + tx_pos: index of transaction in tx_hashes to create branch for + ''' + hashes = [hex_str_to_hash(hash) for hash in tx_hashes] + branch, root = self.db.merkle.branch_and_root(hashes, tx_pos) + branch = [hash_to_hex_str(hash) for hash in branch] + return branch + + async def transaction_merkle(self, tx_hash, height): + '''Return the markle branch to a confirmed transaction given its hash + and height. + + tx_hash: the transaction hash as a hexadecimal string + height: the height of the block it is in + ''' + assert_tx_hash(tx_hash) + block_hash, tx_hashes = await self._block_hash_and_tx_hashes(height) + try: + pos = tx_hashes.index(tx_hash) + except ValueError: + raise RPCError(BAD_REQUEST, f'tx hash {tx_hash} not in ' + f'block {block_hash} at height {height:,d}') + branch = self._get_merkle_branch(tx_hashes, pos) + return {"block_height": height, "merkle": branch, "pos": pos} + + async def transaction_id_from_pos(self, height, tx_pos, merkle=False): + '''Return the txid and optionally a merkle proof, given + a block height and position in the block. + ''' + tx_pos = non_negative_integer(tx_pos) + if merkle not in (True, False): + raise RPCError(BAD_REQUEST, f'"merkle" must be a boolean') + + block_hash, tx_hashes = await self._block_hash_and_tx_hashes(height) + try: + tx_hash = tx_hashes[tx_pos] + except IndexError: + raise RPCError(BAD_REQUEST, f'no tx at position {tx_pos:,d} in ' + f'block {block_hash} at height {height:,d}') + + if merkle: + branch = self._get_merkle_branch(tx_hashes, tx_pos) + return {"tx_hash": tx_hash, "merkle": branch} + else: + return tx_hash + + def set_request_handlers(self, ptuple): + self.protocol_tuple = ptuple + + handlers = { + 'blockchain.block.get_chunk': self.block_get_chunk, + 'blockchain.block.get_header': self.block_get_header, + 'blockchain.estimatefee': self.estimatefee, + 'blockchain.relayfee': self.relayfee, + 'blockchain.scripthash.get_balance': self.scripthash_get_balance, + 'blockchain.scripthash.get_history': self.scripthash_get_history, + 'blockchain.scripthash.get_mempool': self.scripthash_get_mempool, + 'blockchain.scripthash.listunspent': self.scripthash_listunspent, + 'blockchain.scripthash.subscribe': self.scripthash_subscribe, + 'blockchain.transaction.broadcast': self.transaction_broadcast, + 'blockchain.transaction.get': self.transaction_get, + 'blockchain.transaction.get_merkle': self.transaction_merkle, + 'server.add_peer': self.add_peer, + 'server.banner': self.banner, + 'server.donation_address': self.donation_address, + 'server.features': self.server_features_async, + 'server.peers.subscribe': self.peers_subscribe, + 'server.version': self.server_version, + } + + if ptuple >= (1, 2): + # New handler as of 1.2 + handlers.update({ + 'mempool.get_fee_histogram': + self.mempool.compact_fee_histogram, + 'blockchain.block.headers': self.block_headers_12, + 'server.ping': self.ping, + }) + + if ptuple >= (1, 4): + handlers.update({ + 'blockchain.block.header': self.block_header, + 'blockchain.block.headers': self.block_headers, + 'blockchain.headers.subscribe': self.headers_subscribe, + 'blockchain.transaction.id_from_pos': + self.transaction_id_from_pos, + }) + elif ptuple >= (1, 3): + handlers.update({ + 'blockchain.block.header': self.block_header_13, + 'blockchain.headers.subscribe': self.headers_subscribe_True, + }) + else: + handlers.update({ + 'blockchain.headers.subscribe': self.headers_subscribe_False, + 'blockchain.address.get_balance': self.address_get_balance, + 'blockchain.address.get_history': self.address_get_history, + 'blockchain.address.get_mempool': self.address_get_mempool, + 'blockchain.address.listunspent': self.address_listunspent, + 'blockchain.address.subscribe': self.address_subscribe, + }) + + self.request_handlers = handlers + + +class LocalRPC(SessionBase): + '''A local TCP RPC server session.''' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.client = 'RPC' + self.connection._max_response_size = 0 + + def protocol_version_string(self): + return 'RPC' + + +class DashElectrumX(ElectrumX): + '''A TCP server that handles incoming Electrum Dash connections.''' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.mns = set() + + def set_request_handlers(self, ptuple): + super().set_request_handlers(ptuple) + self.request_handlers.update({ + 'masternode.announce.broadcast': + self.masternode_announce_broadcast, + 'masternode.subscribe': self.masternode_subscribe, + 'masternode.list': self.masternode_list + }) + + async def notify(self, touched, height_changed): + '''Notify the client about changes in masternode list.''' + await super().notify(touched, height_changed) + for mn in self.mns: + status = await self.daemon_request('masternode_list', + ['status', mn]) + await self.send_notification('masternode.subscribe', + [mn, status.get(mn)]) + + # Masternode command handlers + async def masternode_announce_broadcast(self, signmnb): + '''Pass through the masternode announce message to be broadcast + by the daemon. + + signmnb: signed masternode broadcast message.''' + try: + return await self.daemon_request('masternode_broadcast', + ['relay', signmnb]) + except DaemonError as e: + error, = e.args + message = error['message'] + self.logger.info(f'masternode_broadcast: {message}') + raise RPCError(BAD_REQUEST, 'the masternode broadcast was ' + f'rejected.\n\n{message}\n[{signmnb}]') + + async def masternode_subscribe(self, collateral): + '''Returns the status of masternode. + + collateral: masternode collateral. + ''' + result = await self.daemon_request('masternode_list', + ['status', collateral]) + if result is not None: + self.mns.add(collateral) + return result.get(collateral) + return None + + async def masternode_list(self, payees): + ''' + Returns the list of masternodes. + + payees: a list of masternode payee addresses. + ''' + if not isinstance(payees, list): + raise RPCError(BAD_REQUEST, 'expected a list of payees') + + def get_masternode_payment_queue(mns): + '''Returns the calculated position in the payment queue for all the + valid masterernodes in the given mns list. + + mns: a list of masternodes information. + ''' + now = int(datetime.datetime.utcnow().strftime("%s")) + mn_queue = [] + + # Only ENABLED masternodes are considered for the list. + for line in mns: + mnstat = mns[line].split() + if mnstat[0] == 'ENABLED': + # if last paid time == 0 + if int(mnstat[5]) == 0: + # use active seconds + mnstat.append(int(mnstat[4])) + else: + # now minus last paid + delta = now - int(mnstat[5]) + # if > active seconds, use active seconds + if delta >= int(mnstat[4]): + mnstat.append(int(mnstat[4])) + # use active seconds + else: + mnstat.append(delta) + mn_queue.append(mnstat) + mn_queue = sorted(mn_queue, key=lambda x: x[8], reverse=True) + return mn_queue + + def get_payment_position(payment_queue, address): + ''' + Returns the position of the payment list for the given address. + + payment_queue: position in the payment queue for the masternode. + address: masternode payee address. + ''' + position = -1 + for pos, mn in enumerate(payment_queue, start=1): + if mn[2] == address: + position = pos + break + return position + + # Accordingly with the masternode payment queue, a custom list + # with the masternode information including the payment + # position is returned. + cache = self.session_mgr.mn_cache + if not cache or self.session_mgr.mn_cache_height != self.db.db_height: + full_mn_list = await self.daemon_request('masternode_list', + ['full']) + mn_payment_queue = get_masternode_payment_queue(full_mn_list) + mn_payment_count = len(mn_payment_queue) + mn_list = [] + for key, value in full_mn_list.items(): + mn_data = value.split() + mn_info = {} + mn_info['vin'] = key + mn_info['status'] = mn_data[0] + mn_info['protocol'] = mn_data[1] + mn_info['payee'] = mn_data[2] + mn_info['lastseen'] = mn_data[3] + mn_info['activeseconds'] = mn_data[4] + mn_info['lastpaidtime'] = mn_data[5] + mn_info['lastpaidblock'] = mn_data[6] + mn_info['ip'] = mn_data[7] + mn_info['paymentposition'] = get_payment_position( + mn_payment_queue, mn_info['payee']) + mn_info['inselection'] = ( + mn_info['paymentposition'] < mn_payment_count // 10) + balance = await self.address_get_balance(mn_info['payee']) + mn_info['balance'] = (sum(balance.values()) + / self.coin.VALUE_PER_COIN) + mn_list.append(mn_info) + cache.clear() + cache.extend(mn_list) + self.session_mgr.mn_cache_height = self.db.db_height + + # If payees is an empty list the whole masternode list is returned + if payees: + return [mn for mn in cache if mn['payee'] in payees] + else: + return cache diff --git a/torba/server/storage.py b/torba/server/storage.py new file mode 100644 index 000000000..859b26ffd --- /dev/null +++ b/torba/server/storage.py @@ -0,0 +1,166 @@ +# Copyright (c) 2016-2017, the ElectrumX authors +# +# All rights reserved. +# +# See the file "LICENCE" for information about the copyright +# and warranty status of this software. + +'''Backend database abstraction.''' + +import os +from functools import partial + +import torba.server.util as util + + +def db_class(name): + '''Returns a DB engine class.''' + for db_class in util.subclasses(Storage): + if db_class.__name__.lower() == name.lower(): + db_class.import_module() + return db_class + raise RuntimeError('unrecognised DB engine "{}"'.format(name)) + + +class Storage(object): + '''Abstract base class of the DB backend abstraction.''' + + def __init__(self, name, for_sync): + self.is_new = not os.path.exists(name) + self.for_sync = for_sync or self.is_new + self.open(name, create=self.is_new) + + @classmethod + def import_module(cls): + '''Import the DB engine module.''' + raise NotImplementedError + + def open(self, name, create): + '''Open an existing database or create a new one.''' + raise NotImplementedError + + def close(self): + '''Close an existing database.''' + raise NotImplementedError + + def get(self, key): + raise NotImplementedError + + def put(self, key, value): + raise NotImplementedError + + def write_batch(self): + '''Return a context manager that provides `put` and `delete`. + + Changes should only be committed when the context manager + closes without an exception. + ''' + raise NotImplementedError + + def iterator(self, prefix=b'', reverse=False): + '''Return an iterator that yields (key, value) pairs from the + database sorted by key. + + If `prefix` is set, only keys starting with `prefix` will be + included. If `reverse` is True the items are returned in + reverse order. + ''' + raise NotImplementedError + + +class LevelDB(Storage): + '''LevelDB database engine.''' + + @classmethod + def import_module(cls): + import plyvel + cls.module = plyvel + + def open(self, name, create): + mof = 512 if self.for_sync else 128 + # Use snappy compression (the default) + self.db = self.module.DB(name, create_if_missing=create, + max_open_files=mof) + self.close = self.db.close + self.get = self.db.get + self.put = self.db.put + self.iterator = self.db.iterator + self.write_batch = partial(self.db.write_batch, transaction=True, + sync=True) + + +class RocksDB(Storage): + '''RocksDB database engine.''' + + @classmethod + def import_module(cls): + import rocksdb + cls.module = rocksdb + + def open(self, name, create): + mof = 512 if self.for_sync else 128 + # Use snappy compression (the default) + options = self.module.Options(create_if_missing=create, + use_fsync=True, + target_file_size_base=33554432, + max_open_files=mof) + self.db = self.module.DB(name, options) + self.get = self.db.get + self.put = self.db.put + + def close(self): + # PyRocksDB doesn't provide a close method; hopefully this is enough + self.db = self.get = self.put = None + import gc + gc.collect() + + def write_batch(self): + return RocksDBWriteBatch(self.db) + + def iterator(self, prefix=b'', reverse=False): + return RocksDBIterator(self.db, prefix, reverse) + + +class RocksDBWriteBatch(object): + '''A write batch for RocksDB.''' + + def __init__(self, db): + self.batch = RocksDB.module.WriteBatch() + self.db = db + + def __enter__(self): + return self.batch + + def __exit__(self, exc_type, exc_val, exc_tb): + if not exc_val: + self.db.write(self.batch) + + +class RocksDBIterator(object): + '''An iterator for RocksDB.''' + + def __init__(self, db, prefix, reverse): + self.prefix = prefix + if reverse: + self.iterator = reversed(db.iteritems()) + nxt_prefix = util.increment_byte_string(prefix) + if nxt_prefix: + self.iterator.seek(nxt_prefix) + try: + next(self.iterator) + except StopIteration: + self.iterator.seek(nxt_prefix) + else: + self.iterator.seek_to_last() + else: + self.iterator = db.iteritems() + self.iterator.seek(prefix) + + def __iter__(self): + return self + + def __next__(self): + k, v = next(self.iterator) + if not k.startswith(self.prefix): + raise StopIteration + return k, v diff --git a/torba/server/text.py b/torba/server/text.py new file mode 100644 index 000000000..409276f8e --- /dev/null +++ b/torba/server/text.py @@ -0,0 +1,82 @@ +import time + +import torba.server.util as util + + +def sessions_lines(data): + '''A generator returning lines for a list of sessions. + + data is the return value of rpc_sessions().''' + fmt = ('{:<6} {:<5} {:>17} {:>5} {:>5} {:>5} ' + '{:>7} {:>7} {:>7} {:>7} {:>7} {:>9} {:>21}') + yield fmt.format('ID', 'Flags', 'Client', 'Proto', + 'Reqs', 'Txs', 'Subs', + 'Recv', 'Recv KB', 'Sent', 'Sent KB', 'Time', 'Peer') + for (id_, flags, peer, client, proto, reqs, txs_sent, subs, + recv_count, recv_size, send_count, send_size, time) in data: + yield fmt.format(id_, flags, client, proto, + '{:,d}'.format(reqs), + '{:,d}'.format(txs_sent), + '{:,d}'.format(subs), + '{:,d}'.format(recv_count), + '{:,d}'.format(recv_size // 1024), + '{:,d}'.format(send_count), + '{:,d}'.format(send_size // 1024), + util.formatted_time(time, sep=''), peer) + + +def groups_lines(data): + '''A generator returning lines for a list of groups. + + data is the return value of rpc_groups().''' + + fmt = ('{:<6} {:>9} {:>9} {:>6} {:>6} {:>8}' + '{:>7} {:>9} {:>7} {:>9}') + yield fmt.format('ID', 'Sessions', 'Bwidth KB', 'Reqs', 'Txs', 'Subs', + 'Recv', 'Recv KB', 'Sent', 'Sent KB') + for (id_, session_count, bandwidth, reqs, txs_sent, subs, + recv_count, recv_size, send_count, send_size) in data: + yield fmt.format(id_, + '{:,d}'.format(session_count), + '{:,d}'.format(bandwidth // 1024), + '{:,d}'.format(reqs), + '{:,d}'.format(txs_sent), + '{:,d}'.format(subs), + '{:,d}'.format(recv_count), + '{:,d}'.format(recv_size // 1024), + '{:,d}'.format(send_count), + '{:,d}'.format(send_size // 1024)) + + +def peers_lines(data): + '''A generator returning lines for a list of peers. + + data is the return value of rpc_peers().''' + def time_fmt(t): + if not t: + return 'Never' + return util.formatted_time(now - t) + + now = time.time() + fmt = ('{:<30} {:<6} {:>5} {:>5} {:<17} {:>4} ' + '{:>4} {:>8} {:>11} {:>11} {:>5} {:>20} {:<15}') + yield fmt.format('Host', 'Status', 'TCP', 'SSL', 'Server', 'Min', + 'Max', 'Pruning', 'Last Good', 'Last Try', + 'Tries', 'Source', 'IP Address') + for item in data: + features = item['features'] + hostname = item['host'] + host = features['hosts'][hostname] + yield fmt.format(hostname[:30], + item['status'], + host.get('tcp_port') or '', + host.get('ssl_port') or '', + features['server_version'] or 'unknown', + features['protocol_min'], + features['protocol_max'], + features['pruning'] or '', + time_fmt(item['last_good']), + time_fmt(item['last_try']), + item['try_count'], + item['source'][:20], + item['ip_addr'] or '') diff --git a/torba/server/tx.py b/torba/server/tx.py new file mode 100644 index 000000000..b1d578c7e --- /dev/null +++ b/torba/server/tx.py @@ -0,0 +1,625 @@ +# Copyright (c) 2016-2017, Neil Booth +# Copyright (c) 2017, the ElectrumX authors +# +# All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# and warranty status of this software. + +'''Transaction-related classes and functions.''' + +from collections import namedtuple + +from torba.server.hash import sha256, double_sha256, hash_to_hex_str +from torba.server.script import OpCodes +from torba.server.util import ( + unpack_le_int32_from, unpack_le_int64_from, unpack_le_uint16_from, + unpack_le_uint32_from, unpack_le_uint64_from, pack_le_int32, pack_varint, + pack_le_uint32, pack_le_int64, pack_varbytes, +) + +ZERO = bytes(32) +MINUS_1 = 4294967295 + + +class Tx(namedtuple("Tx", "version inputs outputs locktime")): + '''Class representing a transaction.''' + + def serialize(self): + return b''.join(( + pack_le_int32(self.version), + pack_varint(len(self.inputs)), + b''.join(tx_in.serialize() for tx_in in self.inputs), + pack_varint(len(self.outputs)), + b''.join(tx_out.serialize() for tx_out in self.outputs), + pack_le_uint32(self.locktime) + )) + + +class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")): + '''Class representing a transaction input.''' + def __str__(self): + script = self.script.hex() + prev_hash = hash_to_hex_str(self.prev_hash) + return ("Input({}, {:d}, script={}, sequence={:d})" + .format(prev_hash, self.prev_idx, script, self.sequence)) + + def is_generation(self): + '''Test if an input is generation/coinbase like''' + return self.prev_idx == MINUS_1 and self.prev_hash == ZERO + + def serialize(self): + return b''.join(( + self.prev_hash, + pack_le_uint32(self.prev_idx), + pack_varbytes(self.script), + pack_le_uint32(self.sequence), + )) + + +class TxOutput(namedtuple("TxOutput", "value pk_script")): + + def serialize(self): + return b''.join(( + pack_le_int64(self.value), + pack_varbytes(self.pk_script), + )) + + +class Deserializer(object): + '''Deserializes blocks into transactions. + + External entry points are read_tx(), read_tx_and_hash(), + read_tx_and_vsize() and read_block(). + + This code is performance sensitive as it is executed 100s of + millions of times during sync. + ''' + + TX_HASH_FN = staticmethod(double_sha256) + + def __init__(self, binary, start=0): + assert isinstance(binary, bytes) + self.binary = binary + self.binary_length = len(binary) + self.cursor = start + + def read_tx(self): + '''Return a deserialized transaction.''' + return Tx( + self._read_le_int32(), # version + self._read_inputs(), # inputs + self._read_outputs(), # outputs + self._read_le_uint32() # locktime + ) + + def read_tx_and_hash(self): + '''Return a (deserialized TX, tx_hash) pair. + + The hash needs to be reversed for human display; for efficiency + we process it in the natural serialized order. + ''' + start = self.cursor + return self.read_tx(), self.TX_HASH_FN(self.binary[start:self.cursor]) + + def read_tx_and_vsize(self): + '''Return a (deserialized TX, vsize) pair.''' + return self.read_tx(), self.binary_length + + def read_tx_block(self): + '''Returns a list of (deserialized_tx, tx_hash) pairs.''' + read = self.read_tx_and_hash + # Some coins have excess data beyond the end of the transactions + return [read() for _ in range(self._read_varint())] + + def _read_inputs(self): + read_input = self._read_input + return [read_input() for i in range(self._read_varint())] + + def _read_input(self): + return TxInput( + self._read_nbytes(32), # prev_hash + self._read_le_uint32(), # prev_idx + self._read_varbytes(), # script + self._read_le_uint32() # sequence + ) + + def _read_outputs(self): + read_output = self._read_output + return [read_output() for i in range(self._read_varint())] + + def _read_output(self): + return TxOutput( + self._read_le_int64(), # value + self._read_varbytes(), # pk_script + ) + + def _read_byte(self): + cursor = self.cursor + self.cursor += 1 + return self.binary[cursor] + + def _read_nbytes(self, n): + cursor = self.cursor + self.cursor = end = cursor + n + assert self.binary_length >= end + return self.binary[cursor:end] + + def _read_varbytes(self): + return self._read_nbytes(self._read_varint()) + + def _read_varint(self): + n = self.binary[self.cursor] + self.cursor += 1 + if n < 253: + return n + if n == 253: + return self._read_le_uint16() + if n == 254: + return self._read_le_uint32() + return self._read_le_uint64() + + def _read_le_int32(self): + result, = unpack_le_int32_from(self.binary, self.cursor) + self.cursor += 4 + return result + + def _read_le_int64(self): + result, = unpack_le_int64_from(self.binary, self.cursor) + self.cursor += 8 + return result + + def _read_le_uint16(self): + result, = unpack_le_uint16_from(self.binary, self.cursor) + self.cursor += 2 + return result + + def _read_le_uint32(self): + result, = unpack_le_uint32_from(self.binary, self.cursor) + self.cursor += 4 + return result + + def _read_le_uint64(self): + result, = unpack_le_uint64_from(self.binary, self.cursor) + self.cursor += 8 + return result + + +class TxSegWit(namedtuple("Tx", "version marker flag inputs outputs " + "witness locktime")): + '''Class representing a SegWit transaction.''' + + +class DeserializerSegWit(Deserializer): + + # https://bitcoincore.org/en/segwit_wallet_dev/#transaction-serialization + + def _read_witness(self, fields): + read_witness_field = self._read_witness_field + return [read_witness_field() for i in range(fields)] + + def _read_witness_field(self): + read_varbytes = self._read_varbytes + return [read_varbytes() for i in range(self._read_varint())] + + def _read_tx_parts(self): + '''Return a (deserialized TX, tx_hash, vsize) tuple.''' + start = self.cursor + marker = self.binary[self.cursor + 4] + if marker: + tx = super().read_tx() + tx_hash = self.TX_HASH_FN(self.binary[start:self.cursor]) + return tx, tx_hash, self.binary_length + + # Ugh, this is nasty. + version = self._read_le_int32() + orig_ser = self.binary[start:self.cursor] + + marker = self._read_byte() + flag = self._read_byte() + + start = self.cursor + inputs = self._read_inputs() + outputs = self._read_outputs() + orig_ser += self.binary[start:self.cursor] + + base_size = self.cursor - start + witness = self._read_witness(len(inputs)) + + start = self.cursor + locktime = self._read_le_uint32() + orig_ser += self.binary[start:self.cursor] + vsize = (3 * base_size + self.binary_length) // 4 + + return TxSegWit(version, marker, flag, inputs, outputs, witness, + locktime), self.TX_HASH_FN(orig_ser), vsize + + def read_tx(self): + return self._read_tx_parts()[0] + + def read_tx_and_hash(self): + tx, tx_hash, vsize = self._read_tx_parts() + return tx, tx_hash + + def read_tx_and_vsize(self): + tx, tx_hash, vsize = self._read_tx_parts() + return tx, vsize + + +class DeserializerAuxPow(Deserializer): + VERSION_AUXPOW = (1 << 8) + + def read_header(self, height, static_header_size): + '''Return the AuxPow block header bytes''' + start = self.cursor + version = self._read_le_uint32() + if version & self.VERSION_AUXPOW: + # We are going to calculate the block size then read it as bytes + self.cursor = start + self.cursor += static_header_size # Block normal header + self.read_tx() # AuxPow transaction + self.cursor += 32 # Parent block hash + merkle_size = self._read_varint() + self.cursor += 32 * merkle_size # Merkle branch + self.cursor += 4 # Index + merkle_size = self._read_varint() + self.cursor += 32 * merkle_size # Chain merkle branch + self.cursor += 4 # Chain index + self.cursor += 80 # Parent block header + header_end = self.cursor + else: + header_end = static_header_size + self.cursor = start + return self._read_nbytes(header_end) + + +class DeserializerAuxPowSegWit(DeserializerSegWit, DeserializerAuxPow): + pass + + +class DeserializerEquihash(Deserializer): + def read_header(self, height, static_header_size): + '''Return the block header bytes''' + start = self.cursor + # We are going to calculate the block size then read it as bytes + self.cursor += static_header_size + solution_size = self._read_varint() + self.cursor += solution_size + header_end = self.cursor + self.cursor = start + return self._read_nbytes(header_end) + + +class DeserializerEquihashSegWit(DeserializerSegWit, DeserializerEquihash): + pass + + +class TxJoinSplit(namedtuple("Tx", "version inputs outputs locktime")): + '''Class representing a JoinSplit transaction.''' + + +class DeserializerZcash(DeserializerEquihash): + def read_tx(self): + header = self._read_le_uint32() + overwintered = ((header >> 31) == 1) + if overwintered: + version = header & 0x7fffffff + self.cursor += 4 # versionGroupId + else: + version = header + + is_overwinter_v3 = version == 3 + is_sapling_v4 = version == 4 + + base_tx = TxJoinSplit( + version, + self._read_inputs(), # inputs + self._read_outputs(), # outputs + self._read_le_uint32() # locktime + ) + + if is_overwinter_v3 or is_sapling_v4: + self.cursor += 4 # expiryHeight + + has_shielded = False + if is_sapling_v4: + self.cursor += 8 # valueBalance + shielded_spend_size = self._read_varint() + self.cursor += shielded_spend_size * 384 # vShieldedSpend + shielded_output_size = self._read_varint() + self.cursor += shielded_output_size * 948 # vShieldedOutput + has_shielded = shielded_spend_size > 0 or shielded_output_size > 0 + + if base_tx.version >= 2: + joinsplit_size = self._read_varint() + if joinsplit_size > 0: + joinsplit_desc_len = 1506 + (192 if is_sapling_v4 else 296) + # JSDescription + self.cursor += joinsplit_size * joinsplit_desc_len + self.cursor += 32 # joinSplitPubKey + self.cursor += 64 # joinSplitSig + + if is_sapling_v4 and has_shielded: + self.cursor += 64 # bindingSig + + return base_tx + + +class TxTime(namedtuple("Tx", "version time inputs outputs locktime")): + '''Class representing transaction that has a time field.''' + + +class DeserializerTxTime(Deserializer): + def read_tx(self): + return TxTime( + self._read_le_int32(), # version + self._read_le_uint32(), # time + self._read_inputs(), # inputs + self._read_outputs(), # outputs + self._read_le_uint32(), # locktime + ) + + +class DeserializerReddcoin(Deserializer): + def read_tx(self): + version = self._read_le_int32() + inputs = self._read_inputs() + outputs = self._read_outputs() + locktime = self._read_le_uint32() + if version > 1: + time = self._read_le_uint32() + else: + time = 0 + + return TxTime(version, time, inputs, outputs, locktime) + + +class DeserializerTxTimeAuxPow(DeserializerTxTime): + VERSION_AUXPOW = (1 << 8) + + def is_merged_block(self): + start = self.cursor + self.cursor = 0 + version = self._read_le_uint32() + self.cursor = start + if version & self.VERSION_AUXPOW: + return True + return False + + def read_header(self, height, static_header_size): + '''Return the AuxPow block header bytes''' + start = self.cursor + version = self._read_le_uint32() + if version & self.VERSION_AUXPOW: + # We are going to calculate the block size then read it as bytes + self.cursor = start + self.cursor += static_header_size # Block normal header + self.read_tx() # AuxPow transaction + self.cursor += 32 # Parent block hash + merkle_size = self._read_varint() + self.cursor += 32 * merkle_size # Merkle branch + self.cursor += 4 # Index + merkle_size = self._read_varint() + self.cursor += 32 * merkle_size # Chain merkle branch + self.cursor += 4 # Chain index + self.cursor += 80 # Parent block header + header_end = self.cursor + else: + header_end = static_header_size + self.cursor = start + return self._read_nbytes(header_end) + + +class DeserializerBitcoinAtom(DeserializerSegWit): + FORK_BLOCK_HEIGHT = 505888 + + def read_header(self, height, static_header_size): + '''Return the block header bytes''' + header_len = static_header_size + if height >= self.FORK_BLOCK_HEIGHT: + header_len += 4 # flags + return self._read_nbytes(header_len) + + +class DeserializerGroestlcoin(DeserializerSegWit): + TX_HASH_FN = staticmethod(sha256) + + +class TxInputTokenPay(TxInput): + '''Class representing a TokenPay transaction input.''' + + OP_ANON_MARKER = 0xb9 + # 2byte marker (cpubkey + sigc + sigr) + MIN_ANON_IN_SIZE = 2 + (33 + 32 + 32) + + def _is_anon_input(self): + return (len(self.script) >= self.MIN_ANON_IN_SIZE and + self.script[0] == OpCodes.OP_RETURN and + self.script[1] == self.OP_ANON_MARKER) + + def is_generation(self): + # Transactions comming in from stealth addresses are seen by + # the blockchain as newly minted coins. The reverse, where coins + # are sent TO a stealth address, are seen by the blockchain as + # a coin burn. + if self._is_anon_input(): + return True + return super(TxInputTokenPay, self).is_generation() + + +class TxInputTokenPayStealth( + namedtuple("TxInput", "keyimage ringsize script sequence")): + '''Class representing a TokenPay stealth transaction input.''' + + def __str__(self): + script = self.script.hex() + keyimage = bytes(self.keyimage).hex() + return ("Input({}, {:d}, script={}, sequence={:d})" + .format(keyimage, self.ringsize[1], script, self.sequence)) + + def is_generation(self): + return True + + def serialize(self): + return b''.join(( + self.keyimage, + self.ringsize, + pack_varbytes(self.script), + pack_le_uint32(self.sequence), + )) + + +class DeserializerTokenPay(DeserializerTxTime): + + def _read_input(self): + txin = TxInputTokenPay( + self._read_nbytes(32), # prev_hash + self._read_le_uint32(), # prev_idx + self._read_varbytes(), # script + self._read_le_uint32(), # sequence + ) + if txin._is_anon_input(): + # Not sure if this is actually needed, and seems + # extra work for no immediate benefit, but it at + # least correctly represents a stealth input + raw = txin.serialize() + deserializer = Deserializer(raw) + txin = TxInputTokenPayStealth( + deserializer._read_nbytes(33), # keyimage + deserializer._read_nbytes(3), # ringsize + deserializer._read_varbytes(), # script + deserializer._read_le_uint32() # sequence + ) + return txin + + +# Decred +class TxInputDcr(namedtuple("TxInput", "prev_hash prev_idx tree sequence")): + '''Class representing a Decred transaction input.''' + + def __str__(self): + prev_hash = hash_to_hex_str(self.prev_hash) + return ("Input({}, {:d}, tree={}, sequence={:d})" + .format(prev_hash, self.prev_idx, self.tree, self.sequence)) + + def is_generation(self): + '''Test if an input is generation/coinbase like''' + return self.prev_idx == MINUS_1 and self.prev_hash == ZERO + + +class TxOutputDcr(namedtuple("TxOutput", "value version pk_script")): + '''Class representing a Decred transaction output.''' + pass + + +class TxDcr(namedtuple("Tx", "version inputs outputs locktime expiry " + "witness")): + '''Class representing a Decred transaction.''' + + +class DeserializerDecred(Deserializer): + @staticmethod + def blake256(data): + from blake256.blake256 import blake_hash + return blake_hash(data) + + @staticmethod + def blake256d(data): + from blake256.blake256 import blake_hash + return blake_hash(blake_hash(data)) + + def read_tx(self): + return self._read_tx_parts(produce_hash=False)[0] + + def read_tx_and_hash(self): + tx, tx_hash, vsize = self._read_tx_parts() + return tx, tx_hash + + def read_tx_and_vsize(self): + tx, tx_hash, vsize = self._read_tx_parts(produce_hash=False) + return tx, vsize + + def read_tx_block(self): + '''Returns a list of (deserialized_tx, tx_hash) pairs.''' + read = self.read_tx_and_hash + txs = [read() for _ in range(self._read_varint())] + stxs = [read() for _ in range(self._read_varint())] + return txs + stxs + + def read_tx_tree(self): + '''Returns a list of deserialized_tx without tx hashes.''' + read_tx = self.read_tx + return [read_tx() for _ in range(self._read_varint())] + + def _read_input(self): + return TxInputDcr( + self._read_nbytes(32), # prev_hash + self._read_le_uint32(), # prev_idx + self._read_byte(), # tree + self._read_le_uint32(), # sequence + ) + + def _read_output(self): + return TxOutputDcr( + self._read_le_int64(), # value + self._read_le_uint16(), # version + self._read_varbytes(), # pk_script + ) + + def _read_witness(self, fields): + read_witness_field = self._read_witness_field + assert fields == self._read_varint() + return [read_witness_field() for _ in range(fields)] + + def _read_witness_field(self): + value_in = self._read_le_int64() + block_height = self._read_le_uint32() + block_index = self._read_le_uint32() + script = self._read_varbytes() + return value_in, block_height, block_index, script + + def _read_tx_parts(self, produce_hash=True): + start = self.cursor + version = self._read_le_int32() + inputs = self._read_inputs() + outputs = self._read_outputs() + locktime = self._read_le_uint32() + expiry = self._read_le_uint32() + end_prefix = self.cursor + witness = self._read_witness(len(inputs)) + + if produce_hash: + # TxSerializeNoWitness << 16 == 0x10000 + no_witness_header = pack_le_uint32(0x10000 | (version & 0xffff)) + prefix_tx = no_witness_header + self.binary[start+4:end_prefix] + tx_hash = self.blake256(prefix_tx) + else: + tx_hash = None + + return TxDcr( + version, + inputs, + outputs, + locktime, + expiry, + witness + ), tx_hash, self.cursor - start diff --git a/torba/server/util.py b/torba/server/util.py new file mode 100644 index 000000000..37694373c --- /dev/null +++ b/torba/server/util.py @@ -0,0 +1,359 @@ +# Copyright (c) 2016-2017, Neil Booth +# +# All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# and warranty status of this software. + +'''Miscellaneous utility classes and functions.''' + + +import array +import inspect +from ipaddress import ip_address +import logging +import re +import sys +from collections import Container, Mapping +from struct import pack, Struct + +# Logging utilities + + +class ConnectionLogger(logging.LoggerAdapter): + '''Prepends a connection identifier to a logging message.''' + def process(self, msg, kwargs): + conn_id = self.extra.get('conn_id', 'unknown') + return f'[{conn_id}] {msg}', kwargs + + +class CompactFormatter(logging.Formatter): + '''Strips the module from the logger name to leave the class only.''' + def format(self, record): + record.name = record.name.rpartition('.')[-1] + return super().format(record) + + +def make_logger(name, *, handler, level): + '''Return the root ElectrumX logger.''' + logger = logging.getLogger(name) + logger.addHandler(handler) + logger.setLevel(logging.INFO) + logger.propagate = False + return logger + + +def class_logger(path, classname): + '''Return a hierarchical logger for a class.''' + return logging.getLogger(path).getChild(classname) + + +# Method decorator. To be used for calculations that will always +# deliver the same result. The method cannot take any arguments +# and should be accessed as an attribute. +class cachedproperty(object): + + def __init__(self, f): + self.f = f + + def __get__(self, obj, type): + obj = obj or type + value = self.f(obj) + setattr(obj, self.f.__name__, value) + return value + + +def formatted_time(t, sep=' '): + '''Return a number of seconds as a string in days, hours, mins and + maybe secs.''' + t = int(t) + fmts = (('{:d}d', 86400), ('{:02d}h', 3600), ('{:02d}m', 60)) + parts = [] + for fmt, n in fmts: + val = t // n + if parts or val: + parts.append(fmt.format(val)) + t %= n + if len(parts) < 3: + parts.append('{:02d}s'.format(t)) + return sep.join(parts) + + +def deep_getsizeof(obj): + """Find the memory footprint of a Python object. + + Based on code from code.tutsplus.com: http://goo.gl/fZ0DXK + + This is a recursive function that drills down a Python object graph + like a dictionary holding nested dictionaries with lists of lists + and tuples and sets. + + The sys.getsizeof function does a shallow size of only. It counts each + object inside a container as pointer only regardless of how big it + really is. + """ + + ids = set() + + def size(o): + if id(o) in ids: + return 0 + + r = sys.getsizeof(o) + ids.add(id(o)) + + if isinstance(o, (str, bytes, bytearray, array.array)): + return r + + if isinstance(o, Mapping): + return r + sum(size(k) + size(v) for k, v in o.items()) + + if isinstance(o, Container): + return r + sum(size(x) for x in o) + + return r + + return size(obj) + + +def subclasses(base_class, strict=True): + '''Return a list of subclasses of base_class in its module.''' + def select(obj): + return (inspect.isclass(obj) and issubclass(obj, base_class) and + (not strict or obj != base_class)) + + pairs = inspect.getmembers(sys.modules[base_class.__module__], select) + return [pair[1] for pair in pairs] + + +def chunks(items, size): + '''Break up items, an iterable, into chunks of length size.''' + for i in range(0, len(items), size): + yield items[i: i + size] + + +def resolve_limit(limit): + if limit is None: + return -1 + assert isinstance(limit, int) and limit >= 0 + return limit + + +def bytes_to_int(be_bytes): + '''Interprets a big-endian sequence of bytes as an integer''' + return int.from_bytes(be_bytes, 'big') + + +def int_to_bytes(value): + '''Converts an integer to a big-endian sequence of bytes''' + return value.to_bytes((value.bit_length() + 7) // 8, 'big') + + +def increment_byte_string(bs): + '''Return the lexicographically next byte string of the same length. + + Return None if there is none (when the input is all 0xff bytes).''' + for n in range(1, len(bs) + 1): + if bs[-n] != 0xff: + return bs[:-n] + bytes([bs[-n] + 1]) + bytes(n - 1) + return None + + +class LogicalFile(object): + '''A logical binary file split across several separate files on disk.''' + + def __init__(self, prefix, digits, file_size): + digit_fmt = '{' + ':0{:d}d'.format(digits) + '}' + self.filename_fmt = prefix + digit_fmt + self.file_size = file_size + + def read(self, start, size=-1): + '''Read up to size bytes from the virtual file, starting at offset + start, and return them. + + If size is -1 all bytes are read.''' + parts = [] + while size != 0: + try: + with self.open_file(start, False) as f: + part = f.read(size) + if not part: + break + except FileNotFoundError: + break + parts.append(part) + start += len(part) + if size > 0: + size -= len(part) + return b''.join(parts) + + def write(self, start, b): + '''Write the bytes-like object, b, to the underlying virtual file.''' + while b: + size = min(len(b), self.file_size - (start % self.file_size)) + with self.open_file(start, True) as f: + f.write(b if size == len(b) else b[:size]) + b = b[size:] + start += size + + def open_file(self, start, create): + '''Open the virtual file and seek to start. Return a file handle. + Raise FileNotFoundError if the file does not exist and create + is False. + ''' + file_num, offset = divmod(start, self.file_size) + filename = self.filename_fmt.format(file_num) + f = open_file(filename, create) + f.seek(offset) + return f + + +def open_file(filename, create=False): + '''Open the file name. Return its handle.''' + try: + return open(filename, 'rb+') + except FileNotFoundError: + if create: + return open(filename, 'wb+') + raise + + +def open_truncate(filename): + '''Open the file name. Return its handle.''' + return open(filename, 'wb+') + + +def address_string(address): + '''Return an address as a correctly formatted string.''' + fmt = '{}:{:d}' + host, port = address + try: + host = ip_address(host) + except ValueError: + pass + else: + if host.version == 6: + fmt = '[{}]:{:d}' + return fmt.format(host, port) + +# See http://stackoverflow.com/questions/2532053/validate-a-hostname-string +# Note underscores are valid in domain names, but strictly invalid in host +# names. We ignore that distinction. + + +SEGMENT_REGEX = re.compile("(?!-)[A-Z_\\d-]{1,63}(? 255: + return False + # strip exactly one dot from the right, if present + if hostname and hostname[-1] == ".": + hostname = hostname[:-1] + return all(SEGMENT_REGEX.match(x) for x in hostname.split(".")) + + +def protocol_tuple(s): + '''Converts a protocol version number, such as "1.0" to a tuple (1, 0). + + If the version number is bad, (0, ) indicating version 0 is returned.''' + try: + return tuple(int(part) for part in s.split('.')) + except Exception: + return (0, ) + + +def version_string(ptuple): + '''Convert a version tuple such as (1, 2) to "1.2". + There is always at least one dot, so (1, ) becomes "1.0".''' + while len(ptuple) < 2: + ptuple += (0, ) + return '.'.join(str(p) for p in ptuple) + + +def protocol_version(client_req, min_tuple, max_tuple): + '''Given a client's protocol version string, return a pair of + protocol tuples: + + (negotiated version, client min request) + + If the request is unsupported, the negotiated protocol tuple is + None. + ''' + if client_req is None: + client_min = client_max = min_tuple + else: + if isinstance(client_req, list) and len(client_req) == 2: + client_min, client_max = client_req + else: + client_min = client_max = client_req + client_min = protocol_tuple(client_min) + client_max = protocol_tuple(client_max) + + result = min(client_max, max_tuple) + if result < max(client_min, min_tuple) or result == (0, ): + result = None + + return result, client_min + + +struct_le_i = Struct('H') +struct_be_I = Struct('>I') +structB = Struct('B') + +unpack_le_int32_from = struct_le_i.unpack_from +unpack_le_int64_from = struct_le_q.unpack_from +unpack_le_uint16_from = struct_le_H.unpack_from +unpack_le_uint32_from = struct_le_I.unpack_from +unpack_le_uint64_from = struct_le_Q.unpack_from +unpack_be_uint16_from = struct_be_H.unpack_from +unpack_be_uint32_from = struct_be_I.unpack_from + +pack_le_int32 = struct_le_i.pack +pack_le_int64 = struct_le_q.pack +pack_le_uint16 = struct_le_H.pack +pack_le_uint32 = struct_le_I.pack +pack_le_uint64 = struct_le_Q.pack +pack_be_uint16 = struct_be_H.pack +pack_be_uint32 = struct_be_I.pack +pack_byte = structB.pack + +hex_to_bytes = bytes.fromhex + + +def pack_varint(n): + if n < 253: + return pack_byte(n) + if n < 65536: + return pack_byte(253) + pack_le_uint16(n) + if n < 4294967296: + return pack_byte(254) + pack_le_uint32(n) + return pack_byte(255) + pack_le_uint64(n) + + +def pack_varbytes(data): + return pack_varint(len(data)) + data diff --git a/torba/testing/__init__.py b/torba/testing/__init__.py new file mode 100644 index 000000000..7b2ed3e52 --- /dev/null +++ b/torba/testing/__init__.py @@ -0,0 +1 @@ +from .testcase import IntegrationTestCase diff --git a/torba/testing/node.py b/torba/testing/node.py new file mode 100644 index 000000000..e3580814c --- /dev/null +++ b/torba/testing/node.py @@ -0,0 +1,344 @@ +import os +import sys +import shutil +import asyncio +import zipfile +import tarfile +import logging +import tempfile +import subprocess +import importlib +import requests +from binascii import hexlify +from typing import Type + +from torba.server.server import Server +from torba.server.env import Env +from torba.wallet import Wallet +from torba.baseledger import BaseLedger, BlockHeightEvent +from torba.basemanager import BaseWalletManager +from torba.baseaccount import BaseAccount + +root = logging.getLogger() +ch = logging.StreamHandler(sys.stdout) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +root.addHandler(ch) + + +def get_manager_from_environment(default_manager=BaseWalletManager): + if 'TORBA_MANAGER' not in os.environ: + return default_manager + module_name = os.environ['TORBA_MANAGER'].split('-')[-1] # tox support + return importlib.import_module(module_name) + + +def get_ledger_from_environment(): + if 'TORBA_LEDGER' not in os.environ: + raise ValueError('Environment variable TORBA_LEDGER must point to a torba based ledger module.') + module_name = os.environ['TORBA_LEDGER'].split('-')[-1] # tox support + return importlib.import_module(module_name) + + +def get_spvserver_from_ledger(ledger_module): + spvserver_path, regtest_class_name = ledger_module.__spvserver__.rsplit('.', 1) + spvserver_module = importlib.import_module(spvserver_path) + return getattr(spvserver_module, regtest_class_name) + + +def get_blockchain_node_from_ledger(ledger_module): + return BlockchainNode( + ledger_module.__node_url__, + os.path.join(ledger_module.__node_bin__, ledger_module.__node_daemon__), + os.path.join(ledger_module.__node_bin__, ledger_module.__node_cli__) + ) + + +def set_logging(ledger_module, level): + logging.getLogger('torba').setLevel(level) + logging.getLogger('torba.server').setLevel(level) + #logging.getLogger('asyncio').setLevel(level) + logging.getLogger('blockchain').setLevel(level) + logging.getLogger(ledger_module.__name__).setLevel(level) + + +class Conductor: + + def __init__(self, ledger_module=None, manager_module=None, verbosity=logging.WARNING): + self.ledger_module = ledger_module or get_ledger_from_environment() + self.manager_module = manager_module or get_manager_from_environment() + self.spv_module = get_spvserver_from_ledger(self.ledger_module) + + self.blockchain_node = get_blockchain_node_from_ledger(self.ledger_module) + self.spv_node = SPVNode(self.spv_module) + self.wallet_node = WalletNode(self.manager_module, self.ledger_module.RegTestLedger) + + set_logging(self.ledger_module, verbosity) + + self.blockchain_started = False + self.spv_started = False + self.wallet_started = False + + async def start_blockchain(self): + await self.blockchain_node.start() + await self.blockchain_node.generate(200) + self.blockchain_started = True + + async def start_spv(self): + await self.spv_node.start() + self.spv_started = True + + async def start_wallet(self): + await self.wallet_node.start() + self.wallet_started = True + + async def start(self): + self.blockchain_started or await self.start_blockchain() + self.spv_started or await self.start_spv() + self.wallet_started or await self.start_wallet() + + async def stop(self): + if self.wallet_started: + try: + await self.wallet_node.stop(cleanup=True) + except Exception as e: + print(e) + if self.spv_started: + try: + await self.spv_node.stop(cleanup=True) + except Exception as e: + print(e) + if self.blockchain_started: + try: + await self.blockchain_node.stop(cleanup=True) + except Exception as e: + print(e) + + +class WalletNode: + + def __init__(self, manager_class: Type[BaseWalletManager], ledger_class: Type[BaseLedger], + verbose: bool = False) -> None: + self.manager_class = manager_class + self.ledger_class = ledger_class + self.verbose = verbose + self.manager: BaseWalletManager = None + self.ledger: BaseLedger = None + self.wallet: Wallet = None + self.account: BaseAccount = None + self.data_path: str = None + + async def start(self): + self.data_path = tempfile.mkdtemp() + wallet_file_name = os.path.join(self.data_path, 'my_wallet.json') + with open(wallet_file_name, 'w') as wf: + wf.write('{"version": 1, "accounts": []}\n') + self.manager = self.manager_class.from_config({ + 'ledgers': { + self.ledger_class.get_id(): { + 'default_servers': [('localhost', 1984)], + 'data_path': self.data_path + } + }, + 'wallets': [wallet_file_name] + }) + self.ledger = self.manager.ledgers[self.ledger_class] + self.wallet = self.manager.default_wallet + self.wallet.generate_account(self.ledger) + self.account = self.wallet.default_account + await self.manager.start() + + async def stop(self, cleanup=True): + try: + await self.manager.stop() + finally: + cleanup and self.cleanup() + + def cleanup(self): + shutil.rmtree(self.data_path, ignore_errors=True) + + +class SPVNode: + + def __init__(self, coin_class): + self.coin_class = coin_class + self.controller = None + self.data_path = None + + async def start(self): + self.data_path = tempfile.mkdtemp() + conf = { + 'DB_DIRECTORY': self.data_path, + 'DAEMON_URL': 'http://rpcuser:rpcpassword@localhost:50001/', + 'REORG_LIMIT': '100', + 'TCP_PORT': '1984' + } + os.environ.update(conf) + self.server = Server(Env(self.coin_class)) + await self.server.start() + + async def stop(self, cleanup=True): + try: + await self.controller.shutdown() + finally: + cleanup and self.cleanup() + + def cleanup(self): + shutil.rmtree(self.data_path, ignore_errors=True) + + +class BlockchainProcess(asyncio.SubprocessProtocol): + + IGNORE_OUTPUT = [ + b'keypool keep', + b'keypool reserve', + b'keypool return', + ] + + def __init__(self, log): + self.ready = asyncio.Event() + self.stopped = asyncio.Event() + self.log = log + + def pipe_data_received(self, fd, data): + if self.log and not any(ignore in data for ignore in self.IGNORE_OUTPUT): + if b'Error:' in data: + self.log.error(data.decode('ascii')) + else: + self.log.info(data.decode('ascii')) + if b'Error:' in data: + self.ready.set() + raise SystemError(data.decode('ascii')) + elif b'Done loading' in data: + self.ready.set() + elif b'Shutdown: done' in data: + self.stopped.set() + + def process_exited(self): + self.stopped.set() + + +class BlockchainNode: + + def __init__(self, url, daemon, cli): + self.latest_release_url = url + self.project_dir = os.path.dirname(os.path.dirname(__file__)) + self.bin_dir = os.path.join(self.project_dir, 'bin') + self.daemon_bin = os.path.join(self.bin_dir, daemon) + self.cli_bin = os.path.join(self.bin_dir, cli) + self.log = logging.getLogger('blockchain') + self.data_path = None + self.protocol = None + self.transport = None + self._block_expected = 0 + + def is_expected_block(self, e: BlockHeightEvent): + return self._block_expected == e.height + + @property + def exists(self): + return ( + os.path.exists(self.cli_bin) and + os.path.exists(self.daemon_bin) + ) + + def download(self): + downloaded_file = os.path.join( + self.bin_dir, + self.latest_release_url[self.latest_release_url.rfind('/')+1:] + ) + + if not os.path.exists(self.bin_dir): + os.mkdir(self.bin_dir) + + if not os.path.exists(downloaded_file): + self.log.info('Downloading: %s', self.latest_release_url) + r = requests.get(self.latest_release_url, stream=True) + with open(downloaded_file, 'wb') as f: + shutil.copyfileobj(r.raw, f) + + self.log.info('Extracting: %s', downloaded_file) + + if downloaded_file.endswith('.zip'): + with zipfile.ZipFile(downloaded_file) as zf: + zf.extractall(self.bin_dir) + # zipfile bug https://bugs.python.org/issue15795 + os.chmod(self.cli_bin, 0o755) + os.chmod(self.daemon_bin, 0o755) + + elif downloaded_file.endswith('.tar.gz'): + with tarfile.open(downloaded_file) as tar: + tar.extractall(self.bin_dir) + + return self.exists + + def ensure(self): + return self.exists or self.download() + + async def start(self): + assert self.ensure() + self.data_path = tempfile.mkdtemp() + loop = asyncio.get_event_loop() + asyncio.get_child_watcher().attach_loop(loop) + command = ( + self.daemon_bin, + '-datadir={}'.format(self.data_path), + '-printtoconsole', '-regtest', '-server', '-txindex', + '-rpcuser=rpcuser', '-rpcpassword=rpcpassword', '-rpcport=50001' + ) + self.log.info(' '.join(command)) + self.transport, self.protocol = await loop.subprocess_exec( + lambda: BlockchainProcess(self.log), *command + ) + await self.protocol.ready.wait() + + async def stop(self, cleanup=True): + try: + self.transport.terminate() + await self.protocol.stopped.wait() + finally: + if cleanup: + self.cleanup() + + def cleanup(self): + shutil.rmtree(self.data_path, ignore_errors=True) + + async def _cli_cmnd(self, *args): + cmnd_args = [ + self.cli_bin, '-datadir={}'.format(self.data_path), '-regtest', + '-rpcuser=rpcuser', '-rpcpassword=rpcpassword', '-rpcport=50001' + ] + list(args) + self.log.info(' '.join(cmnd_args)) + loop = asyncio.get_event_loop() + asyncio.get_child_watcher().attach_loop(loop) + process = await asyncio.create_subprocess_exec( + *cmnd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT + ) + out, err = await process.communicate() + self.log.info(out.decode().strip()) + return out.decode().strip() + + def generate(self, blocks): + self._block_expected += blocks + return self._cli_cmnd('generate', str(blocks)) + + def invalidateblock(self, hash): + return self._cli_cmnd('invalidateblock', hash) + + def get_raw_change_address(self): + return self._cli_cmnd('getrawchangeaddress') + + async def get_balance(self): + return float(await self._cli_cmnd('getbalance')) + + def send_to_address(self, address, credits): + return self._cli_cmnd('sendtoaddress', address, str(credits)) + + def send_raw_transaction(self, tx): + return self._cli_cmnd('sendrawtransaction', tx.decode()) + + def decode_raw_transaction(self, tx): + return self._cli_cmnd('decoderawtransaction', hexlify(tx.raw).decode()) + + def get_raw_transaction(self, txid): + return self._cli_cmnd('getrawtransaction', txid, '1') diff --git a/torba/testing/service.py b/torba/testing/service.py new file mode 100644 index 000000000..29806cd65 --- /dev/null +++ b/torba/testing/service.py @@ -0,0 +1,147 @@ +import asyncio +import logging +from aiohttp.web import Application, WebSocketResponse, json_response +from aiohttp.http_websocket import WSMsgType, WSCloseCode +from .node import Conductor + + +PORT = 7954 + + +class WebSocketLogHandler(logging.Handler): + + def __init__(self, send_message): + super().__init__() + self.send_message = send_message + + def emit(self, record): + try: + self.send_message({ + 'type': 'log', + 'name': record.name, + 'message': self.format(record) + }) + except Exception: + self.handleError(record) + + +class TestingServiceAPI: + + def __init__(self, stack: Conductor, loop: asyncio.AbstractEventLoop): + self.stack = stack + self.loop = loop + self.app = Application() + self.app.router.add_post('/start', self.start_stack) + self.app.router.add_post('/generate', self.generate) + self.app.router.add_post('/transfer', self.transfer) + self.app.router.add_post('/balance', self.balance) + self.app.router.add_get('/log', self.log) + self.app['websockets'] = set() + self.app.on_shutdown.append(self.on_shutdown) + self.handler = self.app.make_handler() + self.server = None + + async def start(self): + self.server = await self.loop.create_server( + self.handler, '0.0.0.0', PORT + ) + print('serving on', self.server.sockets[0].getsockname()) + + async def stop(self): + await self.stack.stop() + self.server.close() + await self.server.wait_closed() + await self.app.shutdown() + await self.handler.shutdown(60.0) + await self.app.cleanup() + + async def start_stack(self, _): + handler = WebSocketLogHandler(self.send_message) + logging.getLogger('blockchain').setLevel(logging.DEBUG) + logging.getLogger('blockchain').addHandler(handler) + logging.getLogger('electrumx').setLevel(logging.DEBUG) + logging.getLogger('electrumx').addHandler(handler) + logging.getLogger('Controller').setLevel(logging.DEBUG) + logging.getLogger('Controller').addHandler(handler) + logging.getLogger('LBRYBlockProcessor').setLevel(logging.DEBUG) + logging.getLogger('LBRYBlockProcessor').addHandler(handler) + logging.getLogger('LBCDaemon').setLevel(logging.DEBUG) + logging.getLogger('LBCDaemon').addHandler(handler) + logging.getLogger('torba').setLevel(logging.DEBUG) + logging.getLogger('torba').addHandler(handler) + logging.getLogger(self.stack.ledger_module.__name__).setLevel(logging.DEBUG) + logging.getLogger(self.stack.ledger_module.__name__).addHandler(handler) + logging.getLogger(self.stack.ledger_module.__electrumx__.split('.')[0]).setLevel(logging.DEBUG) + logging.getLogger(self.stack.ledger_module.__electrumx__.split('.')[0]).addHandler(handler) + #await self.stack.start() + self.stack.blockchain_started or await self.stack.start_blockchain() + self.send_message({'type': 'service', 'name': 'blockchain'}) + self.stack.spv_started or await self.stack.start_spv() + self.send_message({'type': 'service', 'name': 'spv'}) + self.stack.wallet_started or await self.stack.start_wallet() + self.send_message({'type': 'service', 'name': 'wallet'}) + self.stack.wallet_node.ledger.on_header.listen(self.on_status) + self.stack.wallet_node.ledger.on_transaction.listen(self.on_status) + return json_response({'started': True}) + + async def generate(self, request): + data = await request.post() + blocks = data.get('blocks', 1) + await self.stack.blockchain_node.generate(int(blocks)) + return json_response({'blocks': blocks}) + + async def transfer(self, request): + data = await request.post() + address = data.get('address') + if not address: + address = await self.stack.wallet_node.account.receiving.get_or_create_usable_address() + amount = data.get('amount', 1) + txid = await self.stack.blockchain_node.send_to_address(address, amount) + await self.stack.wallet_node.ledger.on_transaction.where( + lambda e: e.tx.id == txid and e.address == address + ) + return json_response({ + 'address': address, + 'amount': amount, + 'txid': txid + }) + + async def balance(self, _): + return json_response({ + 'balance': await self.stack.blockchain_node.get_balance() + }) + + async def log(self, request): + ws = WebSocketResponse() + await ws.prepare(request) + self.app['websockets'].add(ws) + try: + async for msg in ws: + if msg.type == WSMsgType.TEXT: + if msg.data == 'close': + await ws.close() + elif msg.type == WSMsgType.ERROR: + print('ws connection closed with exception %s' % + ws.exception()) + finally: + self.app['websockets'].remove(ws) + return ws + + @staticmethod + async def on_shutdown(app): + for ws in app['websockets']: + await ws.close(code=WSCloseCode.GOING_AWAY, message='Server shutdown') + + async def on_status(self, _): + if not self.app['websockets']: + return + self.send_message({ + 'type': 'status', + 'height': self.stack.wallet_node.ledger.headers.height, + 'balance': await self.stack.wallet_node.account.get_balance(), + 'miner': await self.stack.blockchain_node.get_balance() + }) + + def send_message(self, msg): + for ws in self.app['websockets']: + asyncio.ensure_future(ws.send_json(msg)) diff --git a/torba/testing/testcase.py b/torba/testing/testcase.py new file mode 100644 index 000000000..00ac9b112 --- /dev/null +++ b/torba/testing/testcase.py @@ -0,0 +1,176 @@ +import asyncio +import unittest +import logging +from unittest.case import _Outcome +from .node import Conductor + + +try: + from asyncio.runners import _cancel_all_tasks +except ImportError: + # this is only available in py3.7 + def _cancel_all_tasks(loop): + pass + + +class AsyncioTestCase(unittest.TestCase): + # Implementation inspired by discussion: + # https://bugs.python.org/issue32972 + + async def asyncSetUp(self): + pass + + async def asyncTearDown(self): + pass + + async def doAsyncCleanups(self): + pass + + def run(self, result=None): + orig_result = result + if result is None: + result = self.defaultTestResult() + startTestRun = getattr(result, 'startTestRun', None) + if startTestRun is not None: + startTestRun() + + result.startTest(self) + + testMethod = getattr(self, self._testMethodName) + if (getattr(self.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False)): + # If the class or method was skipped. + try: + skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') + or getattr(testMethod, '__unittest_skip_why__', '')) + self._addSkip(result, self, skip_why) + finally: + result.stopTest(self) + return + expecting_failure_method = getattr(testMethod, + "__unittest_expecting_failure__", False) + expecting_failure_class = getattr(self, + "__unittest_expecting_failure__", False) + expecting_failure = expecting_failure_class or expecting_failure_method + outcome = _Outcome(result) + try: + self._outcome = outcome + + loop = asyncio.new_event_loop() + try: + asyncio.set_event_loop(loop) + loop.set_debug(True) + + with outcome.testPartExecutor(self): + self.setUp() + loop.run_until_complete(self.asyncSetUp()) + if outcome.success: + outcome.expecting_failure = expecting_failure + with outcome.testPartExecutor(self, isTest=True): + possible_coroutine = testMethod() + if asyncio.iscoroutine(possible_coroutine): + loop.run_until_complete(possible_coroutine) + outcome.expecting_failure = False + with outcome.testPartExecutor(self): + loop.run_until_complete(self.asyncTearDown()) + self.tearDown() + finally: + try: + _cancel_all_tasks(loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + finally: + asyncio.set_event_loop(None) + loop.close() + + self.doCleanups() + + for test, reason in outcome.skipped: + self._addSkip(result, test, reason) + self._feedErrorsToResult(result, outcome.errors) + if outcome.success: + if expecting_failure: + if outcome.expectedFailure: + self._addExpectedFailure(result, outcome.expectedFailure) + else: + self._addUnexpectedSuccess(result) + else: + result.addSuccess(self) + return result + finally: + result.stopTest(self) + if orig_result is None: + stopTestRun = getattr(result, 'stopTestRun', None) + if stopTestRun is not None: + stopTestRun() + + # explicitly break reference cycles: + # outcome.errors -> frame -> outcome -> outcome.errors + # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure + outcome.errors.clear() + outcome.expectedFailure = None + + # clear the outcome, no more needed + self._outcome = None + + +class IntegrationTestCase(AsyncioTestCase): + + LEDGER = None + MANAGER = None + VERBOSITY = logging.WARNING + + async def asyncSetUp(self): + self.conductor = Conductor( + ledger_module=self.LEDGER, manager_module=self.MANAGER, verbosity=self.VERBOSITY + ) + await self.conductor.start() + self.blockchain = self.conductor.blockchain_node + self.manager = self.conductor.wallet_node.manager + self.ledger = self.conductor.wallet_node.ledger + self.wallet = self.conductor.wallet_node.wallet + self.account = self.conductor.wallet_node.wallet.default_account + + async def asyncTearDown(self): + await self.conductor.stop() + + def broadcast(self, tx): + return self.ledger.broadcast(tx) + + def get_balance(self, account=None, confirmations=0): + if account is None: + return self.manager.get_balance(confirmations=confirmations) + else: + return account.get_balance(confirmations=confirmations) + + async def on_header(self, height): + if self.ledger.headers.height < height: + await self.ledger.on_header.where( + lambda e: e.height == height + ) + return True + + def on_transaction_id(self, txid): + return self.ledger.on_transaction.where( + lambda e: e.tx.id == txid + ) + + def on_transaction_address(self, tx, address): + return self.ledger.on_transaction.where( + lambda e: e.tx.id == tx.id and e.address == address + ) + + async def on_transaction(self, tx): + addresses = await self.get_tx_addresses(tx, self.ledger) + await asyncio.wait([ + self.ledger.on_transaction.where(lambda e: e.address == address) + for address in addresses + ]) + + async def get_tx_addresses(self, tx, ledger): + addresses = set() + for txo in tx.outputs: + address = ledger.hash160_to_address(txo.script.values['pubkey_hash']) + record = await ledger.db.get_address(address=address) + if record is not None: + addresses.add(address) + return list(addresses) diff --git a/tox.ini b/tox.ini index f7658651e..bda7daf51 100644 --- a/tox.ini +++ b/tox.ini @@ -8,17 +8,14 @@ TESTTYPE = integration: integration [testenv] -deps = - coverage - ../orchstr8 - ../electrumx -extras = test +deps = coverage +extras = test,server changedir = {toxinidir}/tests setenv = - integration: LEDGER={envname} + integration: TORBA_LEDGER={envname} commands = unit: coverage run -p --source={envsitepackagesdir}/torba -m unittest discover -t . unit - integration: orchstr8 download + integration: torba download integration: coverage run -p --source={envsitepackagesdir}/torba -m unittest integration.test_transactions integration: coverage run -p --source={envsitepackagesdir}/torba -m unittest integration.test_reconnect # Too slow on Travis