lbcd/wire/msggetdata.go

134 lines
4.4 KiB
Go
Raw Normal View History

// Copyright (c) 2013-2015 The btcsuite developers
2013-05-08 21:31:00 +02:00
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
2013-05-08 21:31:00 +02:00
import (
"fmt"
"io"
)
// MsgGetData implements the Message interface and represents a bitcoin
// getdata message. It is used to request data such as blocks and transactions
// from another peer. It should be used in response to the inv (MsgInv) message
// to request the actual data referenced by each inventory vector the receiving
// peer doesn't already have. Each message is limited to a maximum number of
// inventory vectors, which is currently 50,000. As a result, multiple messages
// must be used to request larger amounts of data.
//
// Use the AddInvVect function to build up the list of inventory vectors when
// sending a getdata message to another peer.
type MsgGetData struct {
InvList []*InvVect
}
// AddInvVect adds an inventory vector to the message.
func (msg *MsgGetData) AddInvVect(iv *InvVect) error {
if len(msg.InvList)+1 > MaxInvPerMsg {
str := fmt.Sprintf("too many invvect in message [max %v]",
MaxInvPerMsg)
return messageError("MsgGetData.AddInvVect", str)
2013-05-08 21:31:00 +02:00
}
msg.InvList = append(msg.InvList, iv)
return nil
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// This is part of the Message interface implementation.
func (msg *MsgGetData) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error {
count, err := ReadVarInt(r, pver)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
// Limit to max inventory vectors per message.
if count > MaxInvPerMsg {
str := fmt.Sprintf("too many invvect in message [%v]", count)
return messageError("MsgGetData.BtcDecode", str)
2013-05-08 21:31:00 +02:00
}
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
// Create a contiguous slice of inventory vectors to deserialize into in
// order to reduce the number of allocations.
invList := make([]InvVect, count)
msg.InvList = make([]*InvVect, 0, count)
2013-05-08 21:31:00 +02:00
for i := uint64(0); i < count; i++ {
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
iv := &invList[i]
err := readInvVect(r, pver, iv)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
msg.AddInvVect(iv)
2013-05-08 21:31:00 +02:00
}
return nil
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// This is part of the Message interface implementation.
func (msg *MsgGetData) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error {
2013-05-08 21:31:00 +02:00
// Limit to max inventory vectors per message.
count := len(msg.InvList)
if count > MaxInvPerMsg {
str := fmt.Sprintf("too many invvect in message [%v]", count)
return messageError("MsgGetData.BtcEncode", str)
2013-05-08 21:31:00 +02:00
}
err := WriteVarInt(w, pver, uint64(count))
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
for _, iv := range msg.InvList {
err := writeInvVect(w, pver, iv)
if err != nil {
return err
}
}
return nil
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgGetData) Command() string {
return CmdGetData
2013-05-08 21:31:00 +02:00
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgGetData) MaxPayloadLength(pver uint32) uint32 {
// Num inventory vectors (varInt) + max allowed inventory vectors.
2014-03-12 02:09:55 +01:00
return MaxVarIntPayload + (MaxInvPerMsg * maxInvVectPayload)
2013-05-08 21:31:00 +02:00
}
// NewMsgGetData returns a new bitcoin getdata message that conforms to the
// Message interface. See MsgGetData for details.
func NewMsgGetData() *MsgGetData {
return &MsgGetData{
InvList: make([]*InvVect, 0, defaultInvListAlloc),
}
2013-05-08 21:31:00 +02:00
}
// NewMsgGetDataSizeHint returns a new bitcoin getdata message that conforms to
// the Message interface. See MsgGetData for details. This function differs
// from NewMsgGetData in that it allows a default allocation size for the
// backing array which houses the inventory vector list. This allows callers
// who know in advance how large the inventory list will grow to avoid the
// overhead of growing the internal backing array several times when appending
// large amounts of inventory vectors with AddInvVect. Note that the specified
// hint is just that - a hint that is used for the default allocation size.
// Adding more (or less) inventory vectors will still work properly. The size
// hint is limited to MaxInvPerMsg.
func NewMsgGetDataSizeHint(sizeHint uint) *MsgGetData {
// Limit the specified hint to the maximum allow per message.
if sizeHint > MaxInvPerMsg {
sizeHint = MaxInvPerMsg
}
return &MsgGetData{
InvList: make([]*InvVect, 0, sizeHint),
}
}