Replace relevant services logic with a function suite.
Adds HasAllRelevantServices and GetRelevantServices, which check for NETWORK|WITNESS. This changes the following: * Removes nRelevantServices from CConnman, disconnecting it a bit more from protocol-level logic. * Replaces our sometimes-connect-to-!WITNESS-nodes logic with simply always requiring WITNESS|NETWORK for outbound non-feeler connections (feelers still only require NETWORK). * This has the added benefit of removing nServicesExpected from CNode - instead letting net_processing's VERSION message handling simply check HasAllRelevantServices. * This implies we believe WITNESS nodes to continue to be a significant majority of nodes on the network, but also because we cannot sync properly from !WITNESS nodes, it is strange to continue using our valuable outbound slots on them. * In order to prevent this change from preventing connection to -connect= nodes which have !WITNESS, -connect nodes are now given the "addnode" flag. This also allows outbound connections to !NODE_NETWORK nodes for -connect nodes (which was already true of addnodes). * Has the (somewhat unintended) consequence of changing one of the eviction metrics from the same sometimes-connect-to-!WITNESS-nodes metric to requiring HasRelevantServices. This should make NODE_NETWORK_LIMITED much simpler to implement.
This commit is contained in:
parent
167cef8082
commit
44407100ff
6 changed files with 55 additions and 48 deletions
|
@ -815,7 +815,6 @@ void InitLogging()
|
|||
|
||||
namespace { // Variables internal to initialization process only
|
||||
|
||||
ServiceFlags nRelevantServices = NODE_NETWORK;
|
||||
int nMaxConnections;
|
||||
int nUserMaxConnections;
|
||||
int nFD;
|
||||
|
@ -1604,9 +1603,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
|||
// Note that setting NODE_WITNESS is never required: the only downside from not
|
||||
// doing so is that after activation, no upgraded nodes will fetch from you.
|
||||
nLocalServices = ServiceFlags(nLocalServices | NODE_WITNESS);
|
||||
// Only care about others providing witness capabilities if there is a softfork
|
||||
// defined.
|
||||
nRelevantServices = ServiceFlags(nRelevantServices | NODE_WITNESS);
|
||||
}
|
||||
|
||||
// ********************************************************* Step 10: import blocks
|
||||
|
@ -1656,7 +1652,6 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
|
|||
|
||||
CConnman::Options connOptions;
|
||||
connOptions.nLocalServices = nLocalServices;
|
||||
connOptions.nRelevantServices = nRelevantServices;
|
||||
connOptions.nMaxConnections = nMaxConnections;
|
||||
connOptions.nMaxOutbound = std::min(MAX_OUTBOUND_CONNECTIONS, connOptions.nMaxConnections);
|
||||
connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS;
|
||||
|
|
40
src/net.cpp
40
src/net.cpp
|
@ -444,7 +444,6 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
|
|||
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
|
||||
CAddress addr_bind = GetBindAddress(hSocket);
|
||||
CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", false);
|
||||
pnode->nServicesExpected = ServiceFlags(addrConnect.nServices & nRelevantServices);
|
||||
pnode->AddRef();
|
||||
|
||||
return pnode;
|
||||
|
@ -985,7 +984,7 @@ bool CConnman::AttemptToEvictConnection()
|
|||
continue;
|
||||
NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->nMinPingUsecTime,
|
||||
node->nLastBlockTime, node->nLastTXTime,
|
||||
(node->nServices & nRelevantServices) == nRelevantServices,
|
||||
HasAllDesirableServiceFlags(node->nServices),
|
||||
node->fRelayTxes, node->pfilter != nullptr, node->addr, node->nKeyedNetGroup};
|
||||
vEvictionCandidates.push_back(candidate);
|
||||
}
|
||||
|
@ -1602,7 +1601,7 @@ void CConnman::ThreadDNSAddressSeed()
|
|||
LOCK(cs_vNodes);
|
||||
int nRelevant = 0;
|
||||
for (auto pnode : vNodes) {
|
||||
nRelevant += pnode->fSuccessfullyConnected && ((pnode->nServices & nRelevantServices) == nRelevantServices);
|
||||
nRelevant += pnode->fSuccessfullyConnected && HasAllDesirableServiceFlags(pnode->nServices);
|
||||
}
|
||||
if (nRelevant >= 2) {
|
||||
LogPrintf("P2P peers available. Skipped DNS seeding.\n");
|
||||
|
@ -1624,7 +1623,7 @@ void CConnman::ThreadDNSAddressSeed()
|
|||
} else {
|
||||
std::vector<CNetAddr> vIPs;
|
||||
std::vector<CAddress> vAdd;
|
||||
ServiceFlags requiredServiceBits = nRelevantServices;
|
||||
ServiceFlags requiredServiceBits = GetDesirableServiceFlags(NODE_NONE);
|
||||
std::string host = GetDNSHost(seed, &requiredServiceBits);
|
||||
CNetAddr resolveSource;
|
||||
if (!resolveSource.SetInternal(host)) {
|
||||
|
@ -1705,7 +1704,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
|
|||
for (const std::string& strAddr : connect)
|
||||
{
|
||||
CAddress addr(CService(), NODE_NONE);
|
||||
OpenNetworkConnection(addr, false, nullptr, strAddr.c_str());
|
||||
OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), false, false, true);
|
||||
for (int i = 0; i < 10 && i < nLoop; i++)
|
||||
{
|
||||
if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
|
||||
|
@ -1753,17 +1752,11 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
|
|||
// Only connect out to one peer per network group (/16 for IPv4).
|
||||
// Do this here so we don't have to critsect vNodes inside mapAddresses critsect.
|
||||
int nOutbound = 0;
|
||||
int nOutboundRelevant = 0;
|
||||
std::set<std::vector<unsigned char> > setConnected;
|
||||
{
|
||||
LOCK(cs_vNodes);
|
||||
for (CNode* pnode : vNodes) {
|
||||
if (!pnode->fInbound && !pnode->fAddnode) {
|
||||
|
||||
// Count the peers that have all relevant services
|
||||
if (pnode->fSuccessfullyConnected && !pnode->fFeeler && ((pnode->nServices & nRelevantServices) == nRelevantServices)) {
|
||||
nOutboundRelevant++;
|
||||
}
|
||||
// Netgroups for inbound and addnode peers are not excluded because our goal here
|
||||
// is to not use multiple of our limited outbound slots on a single netgroup
|
||||
// but inbound and addnode peers do not use our outbound slots. Inbound peers
|
||||
|
@ -1818,21 +1811,16 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
|
|||
if (IsLimited(addr))
|
||||
continue;
|
||||
|
||||
// only connect to full nodes
|
||||
if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
|
||||
continue;
|
||||
|
||||
// only consider very recently tried nodes after 30 failed attempts
|
||||
if (nANow - addr.nLastTry < 600 && nTries < 30)
|
||||
continue;
|
||||
|
||||
// only consider nodes missing relevant services after 40 failed attempts and only if less than half the outbound are up.
|
||||
ServiceFlags nRequiredServices = nRelevantServices;
|
||||
if (nTries >= 40 && nOutbound < (nMaxOutbound >> 1)) {
|
||||
nRequiredServices = REQUIRED_SERVICES;
|
||||
}
|
||||
|
||||
if ((addr.nServices & nRequiredServices) != nRequiredServices) {
|
||||
// for non-feelers, require all the services we'll want,
|
||||
// for feelers, only require they be a full node (only because most
|
||||
// SPV clients don't have a good address DB available)
|
||||
if (!fFeeler && !HasAllDesirableServiceFlags(addr.nServices)) {
|
||||
continue;
|
||||
} else if (fFeeler && !MayHaveUsefulAddressDB(addr.nServices)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1841,13 +1829,6 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
|
|||
continue;
|
||||
|
||||
addrConnect = addr;
|
||||
|
||||
// regardless of the services assumed to be available, only require the minimum if half or more outbound have relevant services
|
||||
if (nOutboundRelevant >= (nMaxOutbound >> 1)) {
|
||||
addrConnect.nServices = REQUIRED_SERVICES;
|
||||
} else {
|
||||
addrConnect.nServices = nRequiredServices;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2712,7 +2693,6 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn
|
|||
nSendVersion(0)
|
||||
{
|
||||
nServices = NODE_NONE;
|
||||
nServicesExpected = NODE_NONE;
|
||||
hSocket = hSocketIn;
|
||||
nRecvVersion = INIT_PROTO_VERSION;
|
||||
nLastSend = 0;
|
||||
|
|
|
@ -84,8 +84,6 @@ static const bool DEFAULT_FORCEDNSSEED = false;
|
|||
static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000;
|
||||
static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000;
|
||||
|
||||
static const ServiceFlags REQUIRED_SERVICES = NODE_NETWORK;
|
||||
|
||||
// NOTE: When adjusting this, update rpcnet:setban's help ("24h")
|
||||
static const unsigned int DEFAULT_MISBEHAVING_BANTIME = 60 * 60 * 24; // Default 24-hour ban
|
||||
|
||||
|
@ -130,7 +128,6 @@ public:
|
|||
struct Options
|
||||
{
|
||||
ServiceFlags nLocalServices = NODE_NONE;
|
||||
ServiceFlags nRelevantServices = NODE_NONE;
|
||||
int nMaxConnections = 0;
|
||||
int nMaxOutbound = 0;
|
||||
int nMaxAddnode = 0;
|
||||
|
@ -152,7 +149,6 @@ public:
|
|||
|
||||
void Init(const Options& connOptions) {
|
||||
nLocalServices = connOptions.nLocalServices;
|
||||
nRelevantServices = connOptions.nRelevantServices;
|
||||
nMaxConnections = connOptions.nMaxConnections;
|
||||
nMaxOutbound = std::min(connOptions.nMaxOutbound, connOptions.nMaxConnections);
|
||||
nMaxAddnode = connOptions.nMaxAddnode;
|
||||
|
@ -390,9 +386,6 @@ private:
|
|||
/** Services this instance offers */
|
||||
ServiceFlags nLocalServices;
|
||||
|
||||
/** Services this instance cares about */
|
||||
ServiceFlags nRelevantServices;
|
||||
|
||||
CSemaphore *semOutbound;
|
||||
CSemaphore *semAddnode;
|
||||
int nMaxConnections;
|
||||
|
@ -585,7 +578,6 @@ class CNode
|
|||
public:
|
||||
// socket
|
||||
std::atomic<ServiceFlags> nServices;
|
||||
ServiceFlags nServicesExpected;
|
||||
SOCKET hSocket;
|
||||
size_t nSendSize; // total size of all vSendMsg entries
|
||||
size_t nSendOffset; // offset inside the first vSendMsg already sent
|
||||
|
|
|
@ -1232,11 +1232,11 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
{
|
||||
connman->SetServices(pfrom->addr, nServices);
|
||||
}
|
||||
if (pfrom->nServicesExpected & ~nServices)
|
||||
if (!pfrom->fInbound && !pfrom->fFeeler && !pfrom->fAddnode && !HasAllDesirableServiceFlags(nServices))
|
||||
{
|
||||
LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->GetId(), nServices, pfrom->nServicesExpected);
|
||||
LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->GetId(), nServices, GetDesirableServiceFlags(nServices));
|
||||
connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
|
||||
strprintf("Expected to offer services %08x", pfrom->nServicesExpected)));
|
||||
strprintf("Expected to offer services %08x", GetDesirableServiceFlags(nServices))));
|
||||
pfrom->fDisconnect = true;
|
||||
return false;
|
||||
}
|
||||
|
@ -1455,7 +1455,10 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
|
|||
if (interruptMsgProc)
|
||||
return true;
|
||||
|
||||
if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
|
||||
// We only bother storing full nodes, though this may include
|
||||
// things which we would not make an outbound connection to, in
|
||||
// part because we may make feeler connections to them.
|
||||
if (!MayHaveUsefulAddressDB(addr.nServices))
|
||||
continue;
|
||||
|
||||
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
|
||||
|
|
|
@ -277,6 +277,43 @@ enum ServiceFlags : uint64_t {
|
|||
// BIP process.
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets the set of service flags which are "desirable" for a given peer.
|
||||
*
|
||||
* These are the flags which are required for a peer to support for them
|
||||
* to be "interesting" to us, ie for us to wish to use one of our few
|
||||
* outbound connection slots for or for us to wish to prioritize keeping
|
||||
* their connection around.
|
||||
*
|
||||
* Relevant service flags may be peer- and state-specific in that the
|
||||
* version of the peer may determine which flags are required (eg in the
|
||||
* case of NODE_NETWORK_LIMITED where we seek out NODE_NETWORK peers
|
||||
* unless they set NODE_NETWORK_LIMITED and we are out of IBD, in which
|
||||
* case NODE_NETWORK_LIMITED suffices).
|
||||
*
|
||||
* Thus, generally, avoid calling with peerServices == NODE_NONE.
|
||||
*/
|
||||
static ServiceFlags GetDesirableServiceFlags(ServiceFlags services) {
|
||||
return ServiceFlags(NODE_NETWORK | NODE_WITNESS);
|
||||
}
|
||||
|
||||
/**
|
||||
* A shortcut for (services & GetDesirableServiceFlags(services))
|
||||
* == GetDesirableServiceFlags(services), ie determines whether the given
|
||||
* set of service flags are sufficient for a peer to be "relevant".
|
||||
*/
|
||||
static inline bool HasAllDesirableServiceFlags(ServiceFlags services) {
|
||||
return !(GetDesirableServiceFlags(services) & (~services));
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a peer with the given service flags may be capable of having a
|
||||
* robust address-storage DB. Currently an alias for checking NODE_NETWORK.
|
||||
*/
|
||||
static inline bool MayHaveUsefulAddressDB(ServiceFlags services) {
|
||||
return services & NODE_NETWORK;
|
||||
}
|
||||
|
||||
/** A CService with information about it as peer */
|
||||
class CAddress : public CService
|
||||
{
|
||||
|
|
|
@ -217,7 +217,7 @@ UniValue addnode(const JSONRPCRequest& request)
|
|||
if (strCommand == "onetry")
|
||||
{
|
||||
CAddress addr;
|
||||
g_connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str());
|
||||
g_connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), false, false, true);
|
||||
return NullUniValue;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue