introduce a backoff behavior for peers that are not permitting connections
with help from davec o implement peer { retrycount int64 .. o count connect failures per peer o calculate backoff as 10s * retrycount / 2
This commit is contained in:
parent
a69647b94d
commit
568c0044a0
1 changed files with 7 additions and 2 deletions
9
peer.go
9
peer.go
|
@ -108,6 +108,7 @@ type peer struct {
|
||||||
knownInventory *MruInventoryMap
|
knownInventory *MruInventoryMap
|
||||||
knownInvMutex sync.Mutex
|
knownInvMutex sync.Mutex
|
||||||
lastBlock int32
|
lastBlock int32
|
||||||
|
retrycount int64
|
||||||
prevGetBlocksBegin *btcwire.ShaHash
|
prevGetBlocksBegin *btcwire.ShaHash
|
||||||
prevGetBlocksStop *btcwire.ShaHash
|
prevGetBlocksStop *btcwire.ShaHash
|
||||||
prevGetBlockMutex sync.Mutex
|
prevGetBlockMutex sync.Mutex
|
||||||
|
@ -1208,6 +1209,8 @@ func newOutboundPeer(s *server, addr string, persistent bool) *peer {
|
||||||
log.Debugf("[SRVR] Attempting to connect to %s", faddr)
|
log.Debugf("[SRVR] Attempting to connect to %s", faddr)
|
||||||
conn, err := dial("tcp", addr)
|
conn, err := dial("tcp", addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
baseRetryInterval := time.Second * 10
|
||||||
|
p.retrycount += 1
|
||||||
log.Errorf("[SRVR] Failed to connect to %s: %v",
|
log.Errorf("[SRVR] Failed to connect to %s: %v",
|
||||||
faddr, err)
|
faddr, err)
|
||||||
if !persistent {
|
if !persistent {
|
||||||
|
@ -1215,9 +1218,11 @@ func newOutboundPeer(s *server, addr string, persistent bool) *peer {
|
||||||
p.wg.Done()
|
p.wg.Done()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
scaledRetryInterval := baseRetryInterval.Nanoseconds() * p.retrycount / 2
|
||||||
|
scaledRetrySeconds := time.Duration(scaledRetryInterval)
|
||||||
log.Infof("[SRVR] Retrying connection to %s "+
|
log.Infof("[SRVR] Retrying connection to %s "+
|
||||||
"in %s", faddr, connectionRetryInterval)
|
"in %v", faddr, scaledRetrySeconds)
|
||||||
time.Sleep(connectionRetryInterval)
|
time.Sleep(scaledRetrySeconds)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue