Merge pull request #6498
a8d0407
Move recentRejects initialization to top of InitBlockIndex (Wladimir J. van der Laan)0847d9c
Keep track of recently rejected transactions (Peter Todd)d741371
Only use randomly created nonces in CRollingBloomFilter. (Pieter Wuille)d2d7ee0
Make CRollingBloomFilter set nTweak for you (Peter Todd)a3d65fe
Reuse vector hashing code for uint256 (Pieter Wuille)bbe4108
Add uint256 support to CRollingBloomFilter (Peter Todd)
This commit is contained in:
commit
219b916545
5 changed files with 109 additions and 23 deletions
|
@ -8,6 +8,7 @@
|
||||||
#include "hash.h"
|
#include "hash.h"
|
||||||
#include "script/script.h"
|
#include "script/script.h"
|
||||||
#include "script/standard.h"
|
#include "script/standard.h"
|
||||||
|
#include "random.h"
|
||||||
#include "streams.h"
|
#include "streams.h"
|
||||||
|
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
|
@ -121,6 +122,12 @@ void CBloomFilter::clear()
|
||||||
isEmpty = true;
|
isEmpty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CBloomFilter::reset(unsigned int nNewTweak)
|
||||||
|
{
|
||||||
|
clear();
|
||||||
|
nTweak = nNewTweak;
|
||||||
|
}
|
||||||
|
|
||||||
bool CBloomFilter::IsWithinSizeConstraints() const
|
bool CBloomFilter::IsWithinSizeConstraints() const
|
||||||
{
|
{
|
||||||
return vData.size() <= MAX_BLOOM_FILTER_SIZE && nHashFuncs <= MAX_HASH_FUNCS;
|
return vData.size() <= MAX_BLOOM_FILTER_SIZE && nHashFuncs <= MAX_HASH_FUNCS;
|
||||||
|
@ -209,15 +216,17 @@ void CBloomFilter::UpdateEmptyFull()
|
||||||
isEmpty = empty;
|
isEmpty = empty;
|
||||||
}
|
}
|
||||||
|
|
||||||
CRollingBloomFilter::CRollingBloomFilter(unsigned int nElements, double fpRate, unsigned int nTweak) :
|
CRollingBloomFilter::CRollingBloomFilter(unsigned int nElements, double fpRate) :
|
||||||
b1(nElements * 2, fpRate, nTweak), b2(nElements * 2, fpRate, nTweak)
|
b1(nElements * 2, fpRate, 0), b2(nElements * 2, fpRate, 0)
|
||||||
{
|
{
|
||||||
// Implemented using two bloom filters of 2 * nElements each.
|
// Implemented using two bloom filters of 2 * nElements each.
|
||||||
// We fill them up, and clear them, staggered, every nElements
|
// We fill them up, and clear them, staggered, every nElements
|
||||||
// inserted, so at least one always contains the last nElements
|
// inserted, so at least one always contains the last nElements
|
||||||
// inserted.
|
// inserted.
|
||||||
nBloomSize = nElements * 2;
|
|
||||||
nInsertions = 0;
|
nInsertions = 0;
|
||||||
|
nBloomSize = nElements * 2;
|
||||||
|
|
||||||
|
reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CRollingBloomFilter::insert(const std::vector<unsigned char>& vKey)
|
void CRollingBloomFilter::insert(const std::vector<unsigned char>& vKey)
|
||||||
|
@ -234,6 +243,12 @@ void CRollingBloomFilter::insert(const std::vector<unsigned char>& vKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CRollingBloomFilter::insert(const uint256& hash)
|
||||||
|
{
|
||||||
|
vector<unsigned char> data(hash.begin(), hash.end());
|
||||||
|
insert(data);
|
||||||
|
}
|
||||||
|
|
||||||
bool CRollingBloomFilter::contains(const std::vector<unsigned char>& vKey) const
|
bool CRollingBloomFilter::contains(const std::vector<unsigned char>& vKey) const
|
||||||
{
|
{
|
||||||
if (nInsertions < nBloomSize / 2) {
|
if (nInsertions < nBloomSize / 2) {
|
||||||
|
@ -242,9 +257,16 @@ bool CRollingBloomFilter::contains(const std::vector<unsigned char>& vKey) const
|
||||||
return b1.contains(vKey);
|
return b1.contains(vKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CRollingBloomFilter::clear()
|
bool CRollingBloomFilter::contains(const uint256& hash) const
|
||||||
{
|
{
|
||||||
b1.clear();
|
vector<unsigned char> data(hash.begin(), hash.end());
|
||||||
b2.clear();
|
return contains(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CRollingBloomFilter::reset()
|
||||||
|
{
|
||||||
|
unsigned int nNewTweak = GetRand(std::numeric_limits<unsigned int>::max());
|
||||||
|
b1.reset(nNewTweak);
|
||||||
|
b2.reset(nNewTweak);
|
||||||
nInsertions = 0;
|
nInsertions = 0;
|
||||||
}
|
}
|
||||||
|
|
16
src/bloom.h
16
src/bloom.h
|
@ -89,6 +89,7 @@ public:
|
||||||
bool contains(const uint256& hash) const;
|
bool contains(const uint256& hash) const;
|
||||||
|
|
||||||
void clear();
|
void clear();
|
||||||
|
void reset(unsigned int nNewTweak);
|
||||||
|
|
||||||
//! True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS
|
//! True if the size is <= MAX_BLOOM_FILTER_SIZE and the number of hash functions is <= MAX_HASH_FUNCS
|
||||||
//! (catch a filter which was just deserialized which was too big)
|
//! (catch a filter which was just deserialized which was too big)
|
||||||
|
@ -103,7 +104,11 @@ public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
|
* RollingBloomFilter is a probabilistic "keep track of most recently inserted" set.
|
||||||
* Construct it with the number of items to keep track of, and a false-positive rate.
|
* Construct it with the number of items to keep track of, and a false-positive
|
||||||
|
* rate. Unlike CBloomFilter, by default nTweak is set to a cryptographically
|
||||||
|
* secure random value for you. Similarly rather than clear() the method
|
||||||
|
* reset() is provided, which also changes nTweak to decrease the impact of
|
||||||
|
* false-positives.
|
||||||
*
|
*
|
||||||
* contains(item) will always return true if item was one of the last N things
|
* contains(item) will always return true if item was one of the last N things
|
||||||
* insert()'ed ... but may also return true for items that were not inserted.
|
* insert()'ed ... but may also return true for items that were not inserted.
|
||||||
|
@ -111,12 +116,17 @@ public:
|
||||||
class CRollingBloomFilter
|
class CRollingBloomFilter
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
CRollingBloomFilter(unsigned int nElements, double nFPRate, unsigned int nTweak);
|
// A random bloom filter calls GetRand() at creation time.
|
||||||
|
// Don't create global CRollingBloomFilter objects, as they may be
|
||||||
|
// constructed before the randomizer is properly initialized.
|
||||||
|
CRollingBloomFilter(unsigned int nElements, double nFPRate);
|
||||||
|
|
||||||
void insert(const std::vector<unsigned char>& vKey);
|
void insert(const std::vector<unsigned char>& vKey);
|
||||||
|
void insert(const uint256& hash);
|
||||||
bool contains(const std::vector<unsigned char>& vKey) const;
|
bool contains(const std::vector<unsigned char>& vKey) const;
|
||||||
|
bool contains(const uint256& hash) const;
|
||||||
|
|
||||||
void clear();
|
void reset();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
unsigned int nBloomSize;
|
unsigned int nBloomSize;
|
||||||
|
|
68
src/main.cpp
68
src/main.cpp
|
@ -162,6 +162,29 @@ namespace {
|
||||||
*/
|
*/
|
||||||
map<uint256, NodeId> mapBlockSource;
|
map<uint256, NodeId> mapBlockSource;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Filter for transactions that were recently rejected by
|
||||||
|
* AcceptToMemoryPool. These are not rerequested until the chain tip
|
||||||
|
* changes, at which point the entire filter is reset. Protected by
|
||||||
|
* cs_main.
|
||||||
|
*
|
||||||
|
* Without this filter we'd be re-requesting txs from each of our peers,
|
||||||
|
* increasing bandwidth consumption considerably. For instance, with 100
|
||||||
|
* peers, half of which relay a tx we don't accept, that might be a 50x
|
||||||
|
* bandwidth increase. A flooding attacker attempting to roll-over the
|
||||||
|
* filter using minimum-sized, 60byte, transactions might manage to send
|
||||||
|
* 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
|
||||||
|
* two minute window to send invs to us.
|
||||||
|
*
|
||||||
|
* Decreasing the false positive rate is fairly cheap, so we pick one in a
|
||||||
|
* million to make it highly unlikely for users to have issues with this
|
||||||
|
* filter.
|
||||||
|
*
|
||||||
|
* Memory used: 1.7MB
|
||||||
|
*/
|
||||||
|
boost::scoped_ptr<CRollingBloomFilter> recentRejects;
|
||||||
|
uint256 hashRecentRejectsChainTip;
|
||||||
|
|
||||||
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
|
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
|
||||||
struct QueuedBlock {
|
struct QueuedBlock {
|
||||||
uint256 hash;
|
uint256 hash;
|
||||||
|
@ -3248,6 +3271,7 @@ void UnloadBlockIndex()
|
||||||
setDirtyBlockIndex.clear();
|
setDirtyBlockIndex.clear();
|
||||||
setDirtyFileInfo.clear();
|
setDirtyFileInfo.clear();
|
||||||
mapNodeState.clear();
|
mapNodeState.clear();
|
||||||
|
recentRejects.reset(NULL);
|
||||||
|
|
||||||
BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) {
|
BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) {
|
||||||
delete entry.second;
|
delete entry.second;
|
||||||
|
@ -3268,6 +3292,10 @@ bool LoadBlockIndex()
|
||||||
bool InitBlockIndex() {
|
bool InitBlockIndex() {
|
||||||
const CChainParams& chainparams = Params();
|
const CChainParams& chainparams = Params();
|
||||||
LOCK(cs_main);
|
LOCK(cs_main);
|
||||||
|
|
||||||
|
// Initialize global variables that cannot be constructed at startup.
|
||||||
|
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
|
||||||
|
|
||||||
// Check whether we're already initialized
|
// Check whether we're already initialized
|
||||||
if (chainActive.Genesis() != NULL)
|
if (chainActive.Genesis() != NULL)
|
||||||
return true;
|
return true;
|
||||||
|
@ -3670,9 +3698,20 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
|
||||||
{
|
{
|
||||||
case MSG_TX:
|
case MSG_TX:
|
||||||
{
|
{
|
||||||
bool txInMap = false;
|
assert(recentRejects);
|
||||||
txInMap = mempool.exists(inv.hash);
|
if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
|
||||||
return txInMap || mapOrphanTransactions.count(inv.hash) ||
|
{
|
||||||
|
// If the chain tip has changed previously rejected transactions
|
||||||
|
// might be now valid, e.g. due to a nLockTime'd tx becoming valid,
|
||||||
|
// or a double-spend. Reset the rejects filter and give those
|
||||||
|
// txs a second chance.
|
||||||
|
hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
|
||||||
|
recentRejects->reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
return recentRejects->contains(inv.hash) ||
|
||||||
|
mempool.exists(inv.hash) ||
|
||||||
|
mapOrphanTransactions.count(inv.hash) ||
|
||||||
pcoinsTip->HaveCoins(inv.hash);
|
pcoinsTip->HaveCoins(inv.hash);
|
||||||
}
|
}
|
||||||
case MSG_BLOCK:
|
case MSG_BLOCK:
|
||||||
|
@ -4273,6 +4312,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||||
// Probably non-standard or insufficient fee/priority
|
// Probably non-standard or insufficient fee/priority
|
||||||
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
|
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
|
||||||
vEraseQueue.push_back(orphanHash);
|
vEraseQueue.push_back(orphanHash);
|
||||||
|
assert(recentRejects);
|
||||||
|
recentRejects->insert(orphanHash);
|
||||||
}
|
}
|
||||||
mempool.check(pcoinsTip);
|
mempool.check(pcoinsTip);
|
||||||
}
|
}
|
||||||
|
@ -4290,12 +4331,25 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||||
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
|
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
|
||||||
if (nEvicted > 0)
|
if (nEvicted > 0)
|
||||||
LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
|
LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
|
||||||
} else if (pfrom->fWhitelisted) {
|
} else {
|
||||||
|
// AcceptToMemoryPool() returned false, possibly because the tx is
|
||||||
|
// already in the mempool; if the tx isn't in the mempool that
|
||||||
|
// means it was rejected and we shouldn't ask for it again.
|
||||||
|
if (!mempool.exists(tx.GetHash())) {
|
||||||
|
assert(recentRejects);
|
||||||
|
recentRejects->insert(tx.GetHash());
|
||||||
|
}
|
||||||
|
if (pfrom->fWhitelisted) {
|
||||||
// Always relay transactions received from whitelisted peers, even
|
// Always relay transactions received from whitelisted peers, even
|
||||||
// if they are already in the mempool (allowing the node to function
|
// if they were rejected from the mempool, allowing the node to
|
||||||
// as a gateway for nodes hidden behind it).
|
// function as a gateway for nodes hidden behind it.
|
||||||
|
//
|
||||||
|
// FIXME: This includes invalid transactions, which means a
|
||||||
|
// whitelisted peer could get us banned! We may want to change
|
||||||
|
// that.
|
||||||
RelayTransaction(tx);
|
RelayTransaction(tx);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
int nDoS = 0;
|
int nDoS = 0;
|
||||||
if (state.IsInvalid(nDoS))
|
if (state.IsInvalid(nDoS))
|
||||||
{
|
{
|
||||||
|
@ -4797,7 +4851,7 @@ bool SendMessages(CNode* pto, bool fSendTrickle)
|
||||||
{
|
{
|
||||||
// Periodically clear addrKnown to allow refresh broadcasts
|
// Periodically clear addrKnown to allow refresh broadcasts
|
||||||
if (nLastRebroadcast)
|
if (nLastRebroadcast)
|
||||||
pnode->addrKnown.clear();
|
pnode->addrKnown.reset();
|
||||||
|
|
||||||
// Rebroadcast our address
|
// Rebroadcast our address
|
||||||
AdvertizeLocal(pnode);
|
AdvertizeLocal(pnode);
|
||||||
|
|
|
@ -2060,7 +2060,7 @@ unsigned int SendBufferSize() { return 1000*GetArg("-maxsendbuffer", 1*1000); }
|
||||||
|
|
||||||
CNode::CNode(SOCKET hSocketIn, const CAddress& addrIn, const std::string& addrNameIn, bool fInboundIn) :
|
CNode::CNode(SOCKET hSocketIn, const CAddress& addrIn, const std::string& addrNameIn, bool fInboundIn) :
|
||||||
ssSend(SER_NETWORK, INIT_PROTO_VERSION),
|
ssSend(SER_NETWORK, INIT_PROTO_VERSION),
|
||||||
addrKnown(5000, 0.001, insecure_rand()),
|
addrKnown(5000, 0.001),
|
||||||
setInventoryKnown(SendBufferSize() / 1000)
|
setInventoryKnown(SendBufferSize() / 1000)
|
||||||
{
|
{
|
||||||
nServices = 0;
|
nServices = 0;
|
||||||
|
|
|
@ -469,7 +469,7 @@ static std::vector<unsigned char> RandomData()
|
||||||
BOOST_AUTO_TEST_CASE(rolling_bloom)
|
BOOST_AUTO_TEST_CASE(rolling_bloom)
|
||||||
{
|
{
|
||||||
// last-100-entry, 1% false positive:
|
// last-100-entry, 1% false positive:
|
||||||
CRollingBloomFilter rb1(100, 0.01, 0);
|
CRollingBloomFilter rb1(100, 0.01);
|
||||||
|
|
||||||
// Overfill:
|
// Overfill:
|
||||||
static const int DATASIZE=399;
|
static const int DATASIZE=399;
|
||||||
|
@ -500,7 +500,7 @@ BOOST_AUTO_TEST_CASE(rolling_bloom)
|
||||||
BOOST_CHECK(nHits < 175);
|
BOOST_CHECK(nHits < 175);
|
||||||
|
|
||||||
BOOST_CHECK(rb1.contains(data[DATASIZE-1]));
|
BOOST_CHECK(rb1.contains(data[DATASIZE-1]));
|
||||||
rb1.clear();
|
rb1.reset();
|
||||||
BOOST_CHECK(!rb1.contains(data[DATASIZE-1]));
|
BOOST_CHECK(!rb1.contains(data[DATASIZE-1]));
|
||||||
|
|
||||||
// Now roll through data, make sure last 100 entries
|
// Now roll through data, make sure last 100 entries
|
||||||
|
@ -527,7 +527,7 @@ BOOST_AUTO_TEST_CASE(rolling_bloom)
|
||||||
BOOST_CHECK(nHits < 100);
|
BOOST_CHECK(nHits < 100);
|
||||||
|
|
||||||
// last-1000-entry, 0.01% false positive:
|
// last-1000-entry, 0.01% false positive:
|
||||||
CRollingBloomFilter rb2(1000, 0.001, 0);
|
CRollingBloomFilter rb2(1000, 0.001);
|
||||||
for (int i = 0; i < DATASIZE; i++) {
|
for (int i = 0; i < DATASIZE; i++) {
|
||||||
rb2.insert(data[i]);
|
rb2.insert(data[i]);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue