2016-04-15 21:23:57 +02:00
// Copyright (c) 2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
# include "blockencodings.h"
# include "consensus/consensus.h"
# include "consensus/validation.h"
# include "chainparams.h"
# include "hash.h"
# include "random.h"
# include "streams.h"
# include "txmempool.h"
2016-12-02 01:06:41 +01:00
# include "validation.h"
2016-06-09 00:43:50 +02:00
# include "util.h"
2016-04-15 21:23:57 +02:00
# include <unordered_map>
2016-01-03 18:54:50 +01:00
# define MIN_TRANSACTION_BASE_SIZE (::GetSerializeSize(CTransaction(), SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS))
2016-04-15 21:23:57 +02:00
2016-06-25 19:17:45 +02:00
CBlockHeaderAndShortTxIDs : : CBlockHeaderAndShortTxIDs ( const CBlock & block , bool fUseWTXID ) :
2016-04-15 21:23:57 +02:00
nonce ( GetRand ( std : : numeric_limits < uint64_t > : : max ( ) ) ) ,
shorttxids ( block . vtx . size ( ) - 1 ) , prefilledtxn ( 1 ) , header ( block ) {
FillShortTxIDSelector ( ) ;
//TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
prefilledtxn [ 0 ] = { 0 , block . vtx [ 0 ] } ;
for ( size_t i = 1 ; i < block . vtx . size ( ) ; i + + ) {
2016-11-11 02:26:00 +01:00
const CTransaction & tx = * block . vtx [ i ] ;
2016-06-25 19:17:45 +02:00
shorttxids [ i - 1 ] = GetShortID ( fUseWTXID ? tx . GetWitnessHash ( ) : tx . GetHash ( ) ) ;
2016-04-15 21:23:57 +02:00
}
}
void CBlockHeaderAndShortTxIDs : : FillShortTxIDSelector ( ) const {
CDataStream stream ( SER_NETWORK , PROTOCOL_VERSION ) ;
stream < < header < < nonce ;
CSHA256 hasher ;
hasher . Write ( ( unsigned char * ) & ( * stream . begin ( ) ) , stream . end ( ) - stream . begin ( ) ) ;
uint256 shorttxidhash ;
hasher . Finalize ( shorttxidhash . begin ( ) ) ;
shorttxidk0 = shorttxidhash . GetUint64 ( 0 ) ;
shorttxidk1 = shorttxidhash . GetUint64 ( 1 ) ;
}
uint64_t CBlockHeaderAndShortTxIDs : : GetShortID ( const uint256 & txhash ) const {
static_assert ( SHORTTXIDS_LENGTH = = 6 , " shorttxids calculation assumes 6-byte shorttxids " ) ;
return SipHashUint256 ( shorttxidk0 , shorttxidk1 , txhash ) & 0xffffffffffffL ;
}
2017-01-12 21:20:11 +01:00
ReadStatus PartiallyDownloadedBlock : : InitData ( const CBlockHeaderAndShortTxIDs & cmpctblock , const std : : vector < std : : pair < uint256 , CTransactionRef > > & extra_txn ) {
2016-04-15 21:23:57 +02:00
if ( cmpctblock . header . IsNull ( ) | | ( cmpctblock . shorttxids . empty ( ) & & cmpctblock . prefilledtxn . empty ( ) ) )
return READ_STATUS_INVALID ;
2016-01-03 18:54:50 +01:00
if ( cmpctblock . shorttxids . size ( ) + cmpctblock . prefilledtxn . size ( ) > MAX_BLOCK_BASE_SIZE / MIN_TRANSACTION_BASE_SIZE )
2016-04-15 21:23:57 +02:00
return READ_STATUS_INVALID ;
assert ( header . IsNull ( ) & & txn_available . empty ( ) ) ;
header = cmpctblock . header ;
txn_available . resize ( cmpctblock . BlockTxCount ( ) ) ;
int32_t lastprefilledindex = - 1 ;
for ( size_t i = 0 ; i < cmpctblock . prefilledtxn . size ( ) ; i + + ) {
2016-11-11 02:26:00 +01:00
if ( cmpctblock . prefilledtxn [ i ] . tx - > IsNull ( ) )
2016-04-15 21:23:57 +02:00
return READ_STATUS_INVALID ;
2016-11-28 09:19:05 +01:00
lastprefilledindex + = cmpctblock . prefilledtxn [ i ] . index + 1 ; //index is a uint16_t, so can't overflow here
2016-04-15 21:23:57 +02:00
if ( lastprefilledindex > std : : numeric_limits < uint16_t > : : max ( ) )
return READ_STATUS_INVALID ;
if ( ( uint32_t ) lastprefilledindex > cmpctblock . shorttxids . size ( ) + i ) {
// If we are inserting a tx at an index greater than our full list of shorttxids
// plus the number of prefilled txn we've inserted, then we have txn for which we
// have neither a prefilled txn or a shorttxid!
return READ_STATUS_INVALID ;
}
2016-11-11 02:26:00 +01:00
txn_available [ lastprefilledindex ] = cmpctblock . prefilledtxn [ i ] . tx ;
2016-04-15 21:23:57 +02:00
}
2016-06-09 00:43:50 +02:00
prefilled_count = cmpctblock . prefilledtxn . size ( ) ;
2016-04-15 21:23:57 +02:00
2016-08-13 19:21:13 +02:00
// Calculate map of txids -> positions and check mempool to see what we have (or don't)
2016-04-15 21:23:57 +02:00
// Because well-formed cmpctblock messages will have a (relatively) uniform distribution
// of short IDs, any highly-uneven distribution of elements can be safely treated as a
// READ_STATUS_FAILED.
std : : unordered_map < uint64_t , uint16_t > shorttxids ( cmpctblock . shorttxids . size ( ) ) ;
uint16_t index_offset = 0 ;
for ( size_t i = 0 ; i < cmpctblock . shorttxids . size ( ) ; i + + ) {
while ( txn_available [ i + index_offset ] )
index_offset + + ;
shorttxids [ cmpctblock . shorttxids [ i ] ] = i + index_offset ;
2016-06-18 01:36:23 +02:00
// To determine the chance that the number of entries in a bucket exceeds N,
// we use the fact that the number of elements in a single bucket is
// binomially distributed (with n = the number of shorttxids S, and p =
// 1 / the number of buckets), that in the worst case the number of buckets is
// equal to S (due to std::unordered_map having a default load factor of 1.0),
// and that the chance for any bucket to exceed N elements is at most
// buckets * (the chance that any given bucket is above N elements).
// Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
// If we assume blocks of up to 16000, allowing 12 elements per bucket should
// only fail once per ~1 million block transfers (per peer and connection).
2016-04-15 21:23:57 +02:00
if ( shorttxids . bucket_size ( shorttxids . bucket ( cmpctblock . shorttxids [ i ] ) ) > 12 )
return READ_STATUS_FAILED ;
}
// TODO: in the shortid-collision case, we should instead request both transactions
// which collided. Falling back to full-block-request here is overkill.
if ( shorttxids . size ( ) ! = cmpctblock . shorttxids . size ( ) )
return READ_STATUS_FAILED ; // Short ID collision
std : : vector < bool > have_txn ( txn_available . size ( ) ) ;
2016-12-05 05:44:37 +01:00
{
2016-04-15 21:23:57 +02:00
LOCK ( pool - > cs ) ;
2016-06-19 10:31:52 +02:00
const std : : vector < std : : pair < uint256 , CTxMemPool : : txiter > > & vTxHashes = pool - > vTxHashes ;
for ( size_t i = 0 ; i < vTxHashes . size ( ) ; i + + ) {
uint64_t shortid = cmpctblock . GetShortID ( vTxHashes [ i ] . first ) ;
std : : unordered_map < uint64_t , uint16_t > : : iterator idit = shorttxids . find ( shortid ) ;
2016-04-15 21:23:57 +02:00
if ( idit ! = shorttxids . end ( ) ) {
if ( ! have_txn [ idit - > second ] ) {
2016-06-19 10:31:52 +02:00
txn_available [ idit - > second ] = vTxHashes [ i ] . second - > GetSharedTx ( ) ;
2016-04-15 21:23:57 +02:00
have_txn [ idit - > second ] = true ;
2016-06-09 00:43:50 +02:00
mempool_count + + ;
2016-04-15 21:23:57 +02:00
} else {
// If we find two mempool txn that match the short id, just request it.
// This should be rare enough that the extra bandwidth doesn't matter,
// but eating a round-trip due to FillBlock failure would be annoying
2016-06-09 00:43:50 +02:00
if ( txn_available [ idit - > second ] ) {
txn_available [ idit - > second ] . reset ( ) ;
mempool_count - - ;
}
2016-04-15 21:23:57 +02:00
}
}
// Though ideally we'd continue scanning for the two-txn-match-shortid case,
// the performance win of an early exit here is too good to pass up and worth
// the extra risk.
if ( mempool_count = = shorttxids . size ( ) )
break ;
}
2016-12-05 05:44:37 +01:00
}
for ( size_t i = 0 ; i < extra_txn . size ( ) ; i + + ) {
uint64_t shortid = cmpctblock . GetShortID ( extra_txn [ i ] . first ) ;
std : : unordered_map < uint64_t , uint16_t > : : iterator idit = shorttxids . find ( shortid ) ;
if ( idit ! = shorttxids . end ( ) ) {
if ( ! have_txn [ idit - > second ] ) {
txn_available [ idit - > second ] = extra_txn [ i ] . second ;
have_txn [ idit - > second ] = true ;
mempool_count + + ;
2017-01-12 21:19:14 +01:00
extra_count + + ;
2016-12-05 05:44:37 +01:00
} else {
2017-01-17 04:58:06 +01:00
// If we find two mempool/extra txn that match the short id, just
// request it.
2016-12-05 05:44:37 +01:00
// This should be rare enough that the extra bandwidth doesn't matter,
// but eating a round-trip due to FillBlock failure would be annoying
// Note that we dont want duplication between extra_txn and mempool to
// trigger this case, so we compare witness hashes first
if ( txn_available [ idit - > second ] & &
txn_available [ idit - > second ] - > GetWitnessHash ( ) ! = extra_txn [ i ] . second - > GetWitnessHash ( ) ) {
txn_available [ idit - > second ] . reset ( ) ;
mempool_count - - ;
2017-01-12 21:19:14 +01:00
extra_count - - ;
2016-12-05 05:44:37 +01:00
}
}
}
// Though ideally we'd continue scanning for the two-txn-match-shortid case,
// the performance win of an early exit here is too good to pass up and worth
// the extra risk.
if ( mempool_count = = shorttxids . size ( ) )
break ;
}
2016-04-15 21:23:57 +02:00
2016-10-29 01:51:33 +02:00
LogPrint ( " cmpctblock " , " Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of size %lu \n " , cmpctblock . header . GetHash ( ) . ToString ( ) , GetSerializeSize ( cmpctblock , SER_NETWORK , PROTOCOL_VERSION ) ) ;
2016-06-09 00:43:50 +02:00
2016-04-15 21:23:57 +02:00
return READ_STATUS_OK ;
}
bool PartiallyDownloadedBlock : : IsTxAvailable ( size_t index ) const {
assert ( ! header . IsNull ( ) ) ;
assert ( index < txn_available . size ( ) ) ;
return txn_available [ index ] ? true : false ;
}
2016-11-11 22:01:27 +01:00
ReadStatus PartiallyDownloadedBlock : : FillBlock ( CBlock & block , const std : : vector < CTransactionRef > & vtx_missing ) {
2016-04-15 21:23:57 +02:00
assert ( ! header . IsNull ( ) ) ;
2016-11-11 22:01:27 +01:00
uint256 hash = header . GetHash ( ) ;
2016-04-15 21:23:57 +02:00
block = header ;
block . vtx . resize ( txn_available . size ( ) ) ;
size_t tx_missing_offset = 0 ;
for ( size_t i = 0 ; i < txn_available . size ( ) ; i + + ) {
if ( ! txn_available [ i ] ) {
if ( vtx_missing . size ( ) < = tx_missing_offset )
return READ_STATUS_INVALID ;
block . vtx [ i ] = vtx_missing [ tx_missing_offset + + ] ;
} else
2016-11-11 22:01:27 +01:00
block . vtx [ i ] = std : : move ( txn_available [ i ] ) ;
2016-04-15 21:23:57 +02:00
}
2016-11-11 22:01:27 +01:00
// Make sure we can't call FillBlock again.
header . SetNull ( ) ;
txn_available . clear ( ) ;
2016-04-15 21:23:57 +02:00
if ( vtx_missing . size ( ) ! = tx_missing_offset )
return READ_STATUS_INVALID ;
CValidationState state ;
if ( ! CheckBlock ( block , state , Params ( ) . GetConsensus ( ) ) ) {
// TODO: We really want to just check merkle tree manually here,
// but that is expensive, and CheckBlock caches a block's
// "checked-status" (in the CBlock?). CBlock should be able to
// check its own merkle root and cache that check.
if ( state . CorruptionPossible ( ) )
return READ_STATUS_FAILED ; // Possible Short ID collision
2016-10-31 15:03:49 +01:00
return READ_STATUS_CHECKBLOCK_FAILED ;
2016-04-15 21:23:57 +02:00
}
2017-01-12 21:19:14 +01:00
LogPrint ( " cmpctblock " , " Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested \n " , hash . ToString ( ) , prefilled_count , mempool_count , extra_count , vtx_missing . size ( ) ) ;
2016-06-09 00:43:50 +02:00
if ( vtx_missing . size ( ) < 5 ) {
2016-11-11 02:26:00 +01:00
for ( const auto & tx : vtx_missing )
2016-11-11 22:01:27 +01:00
LogPrint ( " cmpctblock " , " Reconstructed block %s required tx %s \n " , hash . ToString ( ) , tx - > GetHash ( ) . ToString ( ) ) ;
2016-06-09 00:43:50 +02:00
}
2016-04-15 21:23:57 +02:00
return READ_STATUS_OK ;
}