lbcd/wire/msgnotfound.go

111 lines
3.2 KiB
Go
Raw Normal View History

// Copyright (c) 2013-2015 The btcsuite developers
2013-05-08 21:31:00 +02:00
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
2013-05-08 21:31:00 +02:00
import (
"fmt"
"io"
)
// MsgNotFound defines a bitcoin notfound message which is sent in response to
// a getdata message if any of the requested data in not available on the peer.
// Each message is limited to a maximum number of inventory vectors, which is
// currently 50,000.
//
// Use the AddInvVect function to build up the list of inventory vectors when
// sending a notfound message to another peer.
type MsgNotFound struct {
InvList []*InvVect
}
// AddInvVect adds an inventory vector to the message.
func (msg *MsgNotFound) AddInvVect(iv *InvVect) error {
if len(msg.InvList)+1 > MaxInvPerMsg {
str := fmt.Sprintf("too many invvect in message [max %v]",
MaxInvPerMsg)
return messageError("MsgNotFound.AddInvVect", str)
2013-05-08 21:31:00 +02:00
}
msg.InvList = append(msg.InvList, iv)
return nil
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// This is part of the Message interface implementation.
func (msg *MsgNotFound) BtcDecode(r io.Reader, pver uint32) error {
count, err := ReadVarInt(r, pver)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
// Limit to max inventory vectors per message.
if count > MaxInvPerMsg {
str := fmt.Sprintf("too many invvect in message [%v]", count)
return messageError("MsgNotFound.BtcDecode", str)
2013-05-08 21:31:00 +02:00
}
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
// Create a contiguous slice of inventory vectors to deserialize into in
// order to reduce the number of allocations.
invList := make([]InvVect, count)
msg.InvList = make([]*InvVect, 0, count)
2013-05-08 21:31:00 +02:00
for i := uint64(0); i < count; i++ {
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
iv := &invList[i]
err := readInvVect(r, pver, iv)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
msg.AddInvVect(iv)
2013-05-08 21:31:00 +02:00
}
return nil
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// This is part of the Message interface implementation.
func (msg *MsgNotFound) BtcEncode(w io.Writer, pver uint32) error {
// Limit to max inventory vectors per message.
count := len(msg.InvList)
if count > MaxInvPerMsg {
str := fmt.Sprintf("too many invvect in message [%v]", count)
return messageError("MsgNotFound.BtcEncode", str)
2013-05-08 21:31:00 +02:00
}
err := WriteVarInt(w, pver, uint64(count))
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
for _, iv := range msg.InvList {
err := writeInvVect(w, pver, iv)
if err != nil {
return err
}
}
return nil
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgNotFound) Command() string {
return CmdNotFound
2013-05-08 21:31:00 +02:00
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgNotFound) MaxPayloadLength(pver uint32) uint32 {
// Max var int 9 bytes + max InvVects at 36 bytes each.
// Num inventory vectors (varInt) + max allowed inventory vectors.
2014-03-12 02:09:55 +01:00
return MaxVarIntPayload + (MaxInvPerMsg * maxInvVectPayload)
2013-05-08 21:31:00 +02:00
}
// NewMsgNotFound returns a new bitcoin notfound message that conforms to the
// Message interface. See MsgNotFound for details.
func NewMsgNotFound() *MsgNotFound {
return &MsgNotFound{
InvList: make([]*InvVect, 0, defaultInvListAlloc),
}
2013-05-08 21:31:00 +02:00
}