Cleanup peer.go.

This commit does some housekeeping on peer.go to make the code more
consistent, correct a few comments, and add new comments to explain the
peer data flow.  A couple of examples are variables not using the standard
Go style (camelCase) and comments that don't match the style of other
comments.
This commit is contained in:
Dave Collins 2013-12-24 10:32:20 -06:00
parent d3a7f15a87
commit 17a9b41bef
2 changed files with 62 additions and 39 deletions

View file

@ -528,7 +528,7 @@ func (b *blockManager) handleInvMsg(imsg *invMsg) {
// Add the inventory to the cache of known inventory // Add the inventory to the cache of known inventory
// for the peer. // for the peer.
imsg.peer.addKnownInventory(iv) imsg.peer.AddKnownInventory(iv)
if b.fetchingHeaders { if b.fetchingHeaders {
// if we are fetching headers and already know // if we are fetching headers and already know

81
peer.go
View file

@ -97,12 +97,29 @@ func newNetAddress(addr net.Addr, services btcwire.ServiceFlag) (*btcwire.NetAdd
return na, nil return na, nil
} }
// TODO(davec): Rename and comment this
type outMsg struct { type outMsg struct {
msg btcwire.Message msg btcwire.Message
doneChan chan bool doneChan chan bool
} }
// peer provides a bitcoin peer for handling bitcoin communications. // peer provides a bitcoin peer for handling bitcoin communications. The
// overall data flow is split into 3 goroutines and a separate block manager.
// Inbound messages are read via the inHandler goroutine and generally
// dispatched to their own handler. For inbound data-related messages such as
// blocks, transactions, and inventory, the data is pased on to the block
// manager to handle it. Outbound messages are queued via QueueMessage or
// QueueInventory. QueueMessage is intended for all messages, including
// responses to data such as blocks and transactions. QueueInventory, on the
// other hand, is only intended for relaying inventory as it employs a trickling
// mechanism to batch the inventory together. The data flow for outbound
// messages uses two goroutines, queueHandler and outHandler. The first,
// queueHandler, is used as a way for external entities (mainly block manager)
// to queue messages quickly regardless of whether the peer is currently
// sending or not. It acts as the traffic cop between the external world and
// the actual goroutine which writes to the network socket. In addition, the
// peer contains several functions which are of the form pushX, that are used
// to push messages to the peer. Internally they use QueueMessage.
type peer struct { type peer struct {
server *server server *server
protocolVersion uint32 protocolVersion uint32
@ -123,12 +140,12 @@ type peer struct {
knownAddresses map[string]bool knownAddresses map[string]bool
knownInventory *MruInventoryMap knownInventory *MruInventoryMap
knownInvMutex sync.Mutex knownInvMutex sync.Mutex
requestedTxns map[btcwire.ShaHash]bool // owned by blockmanager. requestedTxns map[btcwire.ShaHash]bool // owned by blockmanager
requestedBlocks map[btcwire.ShaHash]bool // owned by blockmanager. requestedBlocks map[btcwire.ShaHash]bool // owned by blockmanager
lastBlock int32 lastBlock int32
retrycount int64 retryCount int64
prevGetBlocksBegin *btcwire.ShaHash // owned by blockmanager. prevGetBlocksBegin *btcwire.ShaHash // owned by blockmanager
prevGetBlocksStop *btcwire.ShaHash // owned by blockmaanger. prevGetBlocksStop *btcwire.ShaHash // owned by blockmanager
requestQueue *list.List requestQueue *list.List
continueHash *btcwire.ShaHash continueHash *btcwire.ShaHash
outputQueue chan outMsg outputQueue chan outMsg
@ -160,9 +177,9 @@ func (p *peer) isKnownInventory(invVect *btcwire.InvVect) bool {
return false return false
} }
// addKnownInventory adds the passed inventory to the cache of known inventory // AddKnownInventory adds the passed inventory to the cache of known inventory
// for the peer. It is safe for concurrent access. // for the peer. It is safe for concurrent access.
func (p *peer) addKnownInventory(invVect *btcwire.InvVect) { func (p *peer) AddKnownInventory(invVect *btcwire.InvVect) {
p.knownInvMutex.Lock() p.knownInvMutex.Lock()
defer p.knownInvMutex.Unlock() defer p.knownInvMutex.Unlock()
@ -506,7 +523,7 @@ func (p *peer) handleTxMsg(msg *btcwire.MsgTx) {
// methods and things such as hash caching. // methods and things such as hash caching.
tx := btcutil.NewTx(msg) tx := btcutil.NewTx(msg)
iv := btcwire.NewInvVect(btcwire.InvTypeTx, tx.Sha()) iv := btcwire.NewInvVect(btcwire.InvTypeTx, tx.Sha())
p.addKnownInventory(iv) p.AddKnownInventory(iv)
// Queue the transaction up to be handled by the block manager and // Queue the transaction up to be handled by the block manager and
// intentionally block further receives until the transaction is fully // intentionally block further receives until the transaction is fully
@ -531,7 +548,7 @@ func (p *peer) handleBlockMsg(msg *btcwire.MsgBlock, buf []byte) {
return return
} }
iv := btcwire.NewInvVect(btcwire.InvTypeBlock, hash) iv := btcwire.NewInvVect(btcwire.InvTypeBlock, hash)
p.addKnownInventory(iv) p.AddKnownInventory(iv)
// Queue the block up to be handled by the block // Queue the block up to be handled by the block
// manager and intentionally block further receives // manager and intentionally block further receives
@ -1122,12 +1139,12 @@ out:
idleTimer.Stop() idleTimer.Stop()
// Ensure connection is closed and notify server and block manager that // Ensure connection is closed and notify the server that the peer is
// the peer is done. // done.
p.Disconnect() p.Disconnect()
p.server.donePeers <- p p.server.donePeers <- p
// Only tell blockmanager we are gone if we ever told it we existed.
// Only tell block manager we are gone if we ever told it we existed.
if p.versionKnown { if p.versionKnown {
p.server.blockManager.DonePeer(p) p.server.blockManager.DonePeer(p)
} }
@ -1145,9 +1162,9 @@ func (p *peer) queueHandler() {
trickleTicker := time.NewTicker(time.Second * 10) trickleTicker := time.NewTicker(time.Second * 10)
// We keep the waiting flag so that we know if we have a message queued // We keep the waiting flag so that we know if we have a message queued
// to the outHandler or not. We could use the presence of a head // to the outHandler or not. We could use the presence of a head of
// of the list for this but then we have rather racy concerns about // the list for this but then we have rather racy concerns about whether
// whether it has gotten it at cleanup time - and thus who sends on the // it has gotten it at cleanup time - and thus who sends on the
// message's done channel. To avoid such confusion we keep a different // message's done channel. To avoid such confusion we keep a different
// flag and pendingMsgs only contains messages that we have not yet // flag and pendingMsgs only contains messages that we have not yet
// passed to outHandler. // passed to outHandler.
@ -1171,19 +1188,25 @@ out:
case msg := <-p.outputQueue: case msg := <-p.outputQueue:
waiting = queuePacket(msg, pendingMsgs, waiting) waiting = queuePacket(msg, pendingMsgs, waiting)
// This channel is notified when a message has been sent across
// the network socket.
case <-p.sendDoneQueue: case <-p.sendDoneQueue:
peerLog.Tracef("%s: acked by outhandler", p) peerLog.Tracef("%s: acked by outhandler", p)
// one message on sendChan means one message on
// txDoneChan when it has been sent. // No longer waiting if there are no more messages
// in the pending messages queue.
next := pendingMsgs.Front() next := pendingMsgs.Front()
if next != nil { if next == nil {
waiting = false
continue
}
// Notify the outHandler about the next item to
// asynchronously send.
val := pendingMsgs.Remove(next) val := pendingMsgs.Remove(next)
peerLog.Tracef("%s: sending to outHandler", p) peerLog.Tracef("%s: sending to outHandler", p)
p.sendQueue <- val.(outMsg) p.sendQueue <- val.(outMsg)
peerLog.Tracef("%s: sent to outHandler", p) peerLog.Tracef("%s: sent to outHandler", p)
} else {
waiting = false
}
case iv := <-p.outputInvChan: case iv := <-p.outputInvChan:
// No handshake? They'll find out soon enough. // No handshake? They'll find out soon enough.
@ -1222,7 +1245,7 @@ out:
// Add the inventory that is being relayed to // Add the inventory that is being relayed to
// the known inventory for the peer. // the known inventory for the peer.
p.addKnownInventory(iv) p.AddKnownInventory(iv)
} }
if len(invMsg.InvList) > 0 { if len(invMsg.InvList) > 0 {
waiting = queuePacket(outMsg{msg: invMsg}, waiting = queuePacket(outMsg{msg: invMsg},
@ -1462,8 +1485,8 @@ func newPeerBase(s *server, inbound bool) *peer {
requestedBlocks: make(map[btcwire.ShaHash]bool), requestedBlocks: make(map[btcwire.ShaHash]bool),
requestQueue: list.New(), requestQueue: list.New(),
outputQueue: make(chan outMsg, outputBufferSize), outputQueue: make(chan outMsg, outputBufferSize),
sendQueue: make(chan outMsg, 1), // nonblocking sync. sendQueue: make(chan outMsg, 1), // nonblocking sync
sendDoneQueue: make(chan bool, 1), // nonblocking sync. sendDoneQueue: make(chan bool, 1), // nonblocking sync
outputInvChan: make(chan *btcwire.InvVect, outputBufferSize), outputInvChan: make(chan *btcwire.InvVect, outputBufferSize),
txProcessed: make(chan bool, 1), txProcessed: make(chan bool, 1),
blockProcessed: make(chan bool, 1), blockProcessed: make(chan bool, 1),
@ -1543,14 +1566,14 @@ func newOutboundPeer(s *server, addr string, persistent bool) *peer {
srvrLog.Debugf("Attempting to connect to %s", faddr) srvrLog.Debugf("Attempting to connect to %s", faddr)
conn, err := dial("tcp", addr) conn, err := dial("tcp", addr)
if err != nil { if err != nil {
p.retrycount += 1 p.retryCount += 1
srvrLog.Debugf("Failed to connect to %s: %v", srvrLog.Debugf("Failed to connect to %s: %v",
faddr, err) faddr, err)
if !persistent { if !persistent {
p.server.donePeers <- p p.server.donePeers <- p
return return
} }
scaledInterval := connectionRetryInterval.Nanoseconds() * p.retrycount / 2 scaledInterval := connectionRetryInterval.Nanoseconds() * p.retryCount / 2
scaledDuration := time.Duration(scaledInterval) scaledDuration := time.Duration(scaledInterval)
srvrLog.Debugf("Retrying connection to %s in "+ srvrLog.Debugf("Retrying connection to %s in "+
"%s", faddr, scaledDuration) "%s", faddr, scaledDuration)
@ -1570,7 +1593,7 @@ func newOutboundPeer(s *server, addr string, persistent bool) *peer {
conn.RemoteAddr()) conn.RemoteAddr())
p.conn = conn p.conn = conn
atomic.AddInt32(&p.connected, 1) atomic.AddInt32(&p.connected, 1)
p.retrycount = 0 p.retryCount = 0
p.Start() p.Start()
} }