keep track of connected groups as we gain and lose outbound peers.

Instead of recalculating every time we go to look for more peers. Should
save a few cycles.
This commit is contained in:
Owain G. Ainsworth 2013-10-31 19:53:29 +00:00
parent 6949a4f940
commit c1a1e6b6b2

View file

@ -74,6 +74,7 @@ type peerState struct {
outboundPeers *list.List outboundPeers *list.List
persistentPeers *list.List persistentPeers *list.List
banned map[string]time.Time banned map[string]time.Time
outboundGroups map[string]int
maxOutboundPeers int maxOutboundPeers int
} }
@ -138,12 +139,13 @@ func (s *server) handleAddPeerMsg(state *peerState, p *peer) bool {
// Add the new peer and start it. // Add the new peer and start it.
log.Debugf("SRVR: New peer %s", p) log.Debugf("SRVR: New peer %s", p)
if p.persistent { if p.inbound {
state.persistentPeers.PushBack(p) state.peers.PushBack(p)
p.Start()
} else { } else {
if p.inbound { state.outboundGroups[GroupKey(p.na)]++
state.peers.PushBack(p) if p.persistent {
p.Start() state.persistentPeers.PushBack(p)
} else { } else {
state.outboundPeers.PushBack(p) state.outboundPeers.PushBack(p)
} }
@ -172,6 +174,9 @@ func (s *server) handleDonePeerMsg(state *peerState, p *peer) {
e.Value = newOutboundPeer(s, p.addr, true) e.Value = newOutboundPeer(s, p.addr, true)
return return
} }
if !p.inbound {
state.outboundGroups[GroupKey(p.na)]--
}
list.Remove(e) list.Remove(e)
log.Debugf("SRVR: Removed peer %s", p) log.Debugf("SRVR: Removed peer %s", p)
return return
@ -353,6 +358,9 @@ func (s *server) handleQuery(querymsg interface{}, state *peerState) {
for e := state.persistentPeers.Front(); e != nil; e = e.Next() { for e := state.persistentPeers.Front(); e != nil; e = e.Next() {
peer := e.Value.(*peer) peer := e.Value.(*peer)
if peer.addr == msg.addr { if peer.addr == msg.addr {
// Keep group counts ok since we remove from
// the list now.
state.outboundGroups[GroupKey(peer.na)]--
// This is ok because we are not continuing // This is ok because we are not continuing
// to iterate so won't corrupt the loop. // to iterate so won't corrupt the loop.
state.persistentPeers.Remove(e) state.persistentPeers.Remove(e)
@ -450,6 +458,7 @@ func (s *server) peerHandler() {
outboundPeers: list.New(), outboundPeers: list.New(),
banned: make(map[string]time.Time), banned: make(map[string]time.Time),
maxOutboundPeers: defaultMaxOutbound, maxOutboundPeers: defaultMaxOutbound,
outboundGroups: make(map[string]int),
} }
if cfg.MaxPeers < state.maxOutboundPeers { if cfg.MaxPeers < state.maxOutboundPeers {
state.maxOutboundPeers = cfg.MaxPeers state.maxOutboundPeers = cfg.MaxPeers
@ -515,11 +524,6 @@ out:
atomic.LoadInt32(&s.shutdown) != 0 { atomic.LoadInt32(&s.shutdown) != 0 {
continue continue
} }
groups := make(map[string]int)
forAllOutboundPeers(state, func(p *peer) {
groups[GroupKey(p.na)]++
})
tries := 0 tries := 0
for state.NeedMoreOutbound() && for state.NeedMoreOutbound() &&
atomic.LoadInt32(&s.shutdown) == 0 { atomic.LoadInt32(&s.shutdown) == 0 {
@ -543,7 +547,7 @@ out:
// to the same network segment at the expense of // to the same network segment at the expense of
// others. bitcoind breaks out of the loop here, but // others. bitcoind breaks out of the loop here, but
// we continue to try other addresses. // we continue to try other addresses.
if groups[key] != 0 { if state.outboundGroups[key] != 0 {
continue continue
} }
@ -576,7 +580,6 @@ out:
// already checked that we have room for more peers. // already checked that we have room for more peers.
if s.handleAddPeerMsg(state, if s.handleAddPeerMsg(state,
newOutboundPeer(s, addrStr, false)) { newOutboundPeer(s, addrStr, false)) {
groups[key]++
} }
} }