lbcd/wire/msgtx.go

751 lines
25 KiB
Go
Raw Normal View History

// Copyright (c) 2013-2016 The btcsuite developers
2013-05-08 21:31:00 +02:00
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
2013-05-08 21:31:00 +02:00
import (
"bytes"
"fmt"
2013-05-08 21:31:00 +02:00
"io"
2015-01-05 22:40:43 +01:00
"strconv"
"github.com/btcsuite/btcd/chaincfg/chainhash"
2013-05-08 21:31:00 +02:00
)
const (
// TxVersion is the current latest supported transaction version.
TxVersion = 1
// MaxTxInSequenceNum is the maximum sequence number the sequence field
// of a transaction input can be.
MaxTxInSequenceNum uint32 = 0xffffffff
// MaxPrevOutIndex is the maximum index the index field of a previous
// outpoint can be.
MaxPrevOutIndex uint32 = 0xffffffff
// SequenceLockTimeDisabled is a flag that if set on a transaction
// input's sequence number, the sequence number will not be interpreted
// as a relative locktime.
SequenceLockTimeDisabled = 1 << 31
// SequenceLockTimeIsSeconds is a flag that if set on a transaction
// input's sequence number, the relative locktime has units of 512
// seconds.
SequenceLockTimeIsSeconds = 1 << 22
// SequenceLockTimeMask is a mask that extracts the relative locktime
// when masked against the transaction input sequence number.
SequenceLockTimeMask = 0x0000ffff
// SequenceLockTimeGranularity is the defined time based granularity
// for seconds-based relative time locks. When converting from seconds
// to a sequence number, the value is right shifted by this amount,
// therefore the granularity of relative time locks in 512 or 2^9
// seconds. Enforced relative lock times are multiples of 512 seconds.
SequenceLockTimeGranularity = 9
// defaultTxInOutAlloc is the default size used for the backing array for
// transaction inputs and outputs. The array will dynamically grow as needed,
// but this figure is intended to provide enough space for the number of
// inputs and outputs in a typical transaction without needing to grow the
// backing array multiple times.
defaultTxInOutAlloc = 15
// minTxInPayload is the minimum payload size for a transaction input.
2014-10-01 19:14:42 +02:00
// PreviousOutPoint.Hash + PreviousOutPoint.Index 4 bytes + Varint for
// SignatureScript length 1 byte + Sequence 4 bytes.
minTxInPayload = 9 + chainhash.HashSize
// maxTxInPerMessage is the maximum number of transactions inputs that
// a transaction which fits into a message could possibly have.
maxTxInPerMessage = (MaxMessagePayload / minTxInPayload) + 1
// minTxOutPayload is the minimum payload size for a transaction output.
// Value 8 bytes + Varint for PkScript length 1 byte.
minTxOutPayload = 9
// maxTxOutPerMessage is the maximum number of transactions outputs that
// a transaction which fits into a message could possibly have.
maxTxOutPerMessage = (MaxMessagePayload / minTxOutPayload) + 1
// minTxPayload is the minimum payload size for a transaction. Note
// that any realistically usable transaction must have at least one
// input or output, but that is a rule enforced at a higher layer, so
// it is intentionally not included here.
// Version 4 bytes + Varint number of transaction inputs 1 byte + Varint
// number of transaction outputs 1 byte + LockTime 4 bytes + min input
// payload + min output payload.
minTxPayload = 10
// freeListMaxScriptSize is the size of each buffer in the free list
// that is used for deserializing scripts from the wire before they are
// concatenated into a single contiguous buffers. This value was chosen
// because it is slightly more than twice the size of the vast majority
// of all "standard" scripts. Larger scripts are still deserialized
// properly as the free list will simply be bypassed for them.
freeListMaxScriptSize = 512
// freeListMaxItems is the number of buffers to keep in the free list
// to use for script deserialization. This value allows up to 100
// scripts per transaction being simultaneously deserialized by 125
// peers. Thus, the peak usage of the free list is 12,500 * 512 =
// 6,400,000 bytes.
freeListMaxItems = 12500
)
2013-05-08 21:31:00 +02:00
// scriptFreeList defines a free list of byte slices (up to the maximum number
// defined by the freeListMaxItems constant) that have a cap according to the
// freeListMaxScriptSize constant. It is used to provide temporary buffers for
// deserializing scripts in order to greatly reduce the number of allocations
// required.
//
// The caller can obtain a buffer from the free list by calling the Borrow
// function and should return it via the Return function when done using it.
type scriptFreeList chan []byte
// Borrow returns a byte slice from the free list with a length according the
// provided size. A new buffer is allocated if there are any items available.
//
// When the size is larger than the max size allowed for items on the free list
// a new buffer of the appropriate size is allocated and returned. It is safe
// to attempt to return said buffer via the Return function as it will be
// ignored and allowed to go the garbage collector.
func (c scriptFreeList) Borrow(size uint64) []byte {
if size > freeListMaxScriptSize {
return make([]byte, size)
}
var buf []byte
select {
case buf = <-c:
default:
buf = make([]byte, freeListMaxScriptSize)
}
return buf[:size]
}
// Return puts the provided byte slice back on the free list when it has a cap
// of the expected length. The buffer is expected to have been obtained via
// the Borrow function. Any slices that are not of the appropriate size, such
// as those whose size is greater than the largest allowed free list item size
// are simply ignored so they can go to the garbage collector.
func (c scriptFreeList) Return(buf []byte) {
// Ignore any buffers returned that aren't the expected size for the
// free list.
if cap(buf) != freeListMaxScriptSize {
return
}
// Return the buffer to the free list when it's not full. Otherwise let
// it be garbage collected.
select {
case c <- buf:
default:
// Let it go to the garbage collector.
}
}
// Create the concurrent safe free list to use for script deserialization. As
// previously described, this free list is maintained to significantly reduce
// the number of allocations.
var scriptPool scriptFreeList = make(chan []byte, freeListMaxItems)
// OutPoint defines a bitcoin data type that is used to track previous
2013-05-08 21:31:00 +02:00
// transaction outputs.
type OutPoint struct {
Hash chainhash.Hash
2013-05-08 21:31:00 +02:00
Index uint32
}
// NewOutPoint returns a new bitcoin transaction outpoint point with the
// provided hash and index.
func NewOutPoint(hash *chainhash.Hash, index uint32) *OutPoint {
2013-05-08 21:31:00 +02:00
return &OutPoint{
Hash: *hash,
Index: index,
}
}
2015-01-05 22:40:43 +01:00
// String returns the OutPoint in the human-readable form "hash:index".
func (o OutPoint) String() string {
// Allocate enough for hash string, colon, and 10 digits. Although
// at the time of writing, the number of digits can be no greater than
// the length of the decimal representation of maxTxOutPerMessage, the
// maximum message payload may increase in the future and this
// optimization may go unnoticed, so allocate space for 10 decimal
// digits, which will fit any uint32.
buf := make([]byte, 2*chainhash.HashSize+1, 2*chainhash.HashSize+1+10)
2015-01-05 22:40:43 +01:00
copy(buf, o.Hash.String())
buf[2*chainhash.HashSize] = ':'
2015-01-05 22:40:43 +01:00
buf = strconv.AppendUint(buf, uint64(o.Index), 10)
return string(buf)
}
2013-05-08 21:31:00 +02:00
// TxIn defines a bitcoin transaction input.
type TxIn struct {
PreviousOutPoint OutPoint
2013-05-08 21:31:00 +02:00
SignatureScript []byte
Sequence uint32
}
// SerializeSize returns the number of bytes it would take to serialize the
// the transaction input.
func (t *TxIn) SerializeSize() int {
// Outpoint Hash 32 bytes + Outpoint Index 4 bytes + Sequence 4 bytes +
// serialized varint size for the length of SignatureScript +
// SignatureScript bytes.
return 40 + VarIntSerializeSize(uint64(len(t.SignatureScript))) +
len(t.SignatureScript)
}
2013-05-08 21:31:00 +02:00
// NewTxIn returns a new bitcoin transaction input with the provided
// previous outpoint point and signature script with a default sequence of
// MaxTxInSequenceNum.
func NewTxIn(prevOut *OutPoint, signatureScript []byte) *TxIn {
return &TxIn{
PreviousOutPoint: *prevOut,
2013-05-08 21:31:00 +02:00
SignatureScript: signatureScript,
Sequence: MaxTxInSequenceNum,
}
}
// TxOut defines a bitcoin transaction output.
type TxOut struct {
Value int64
PkScript []byte
}
// SerializeSize returns the number of bytes it would take to serialize the
// the transaction output.
func (t *TxOut) SerializeSize() int {
// Value 8 bytes + serialized varint size for the length of PkScript +
// PkScript bytes.
return 8 + VarIntSerializeSize(uint64(len(t.PkScript))) + len(t.PkScript)
}
2013-05-08 21:31:00 +02:00
// NewTxOut returns a new bitcoin transaction output with the provided
// transaction value and public key script.
func NewTxOut(value int64, pkScript []byte) *TxOut {
return &TxOut{
Value: value,
PkScript: pkScript,
}
}
// MsgTx implements the Message interface and represents a bitcoin tx message.
// It is used to deliver transaction information in response to a getdata
// message (MsgGetData) for a given transaction.
//
// Use the AddTxIn and AddTxOut functions to build up the list of transaction
// inputs and outputs.
type MsgTx struct {
Version int32
2013-05-08 21:31:00 +02:00
TxIn []*TxIn
TxOut []*TxOut
LockTime uint32
}
// AddTxIn adds a transaction input to the message.
func (msg *MsgTx) AddTxIn(ti *TxIn) {
msg.TxIn = append(msg.TxIn, ti)
}
// AddTxOut adds a transaction output to the message.
func (msg *MsgTx) AddTxOut(to *TxOut) {
msg.TxOut = append(msg.TxOut, to)
}
// TxHash generates the Hash for the transaction.
func (msg *MsgTx) TxHash() chainhash.Hash {
// Encode the transaction and calculate double sha256 on the result.
// Ignore the error returns since the only way the encode could fail
// is being out of memory or due to nil pointers, both of which would
// cause a run-time panic.
buf := bytes.NewBuffer(make([]byte, 0, msg.SerializeSize()))
_ = msg.Serialize(buf)
return chainhash.DoubleHashH(buf.Bytes())
2013-05-08 21:31:00 +02:00
}
// Copy creates a deep copy of a transaction so that the original does not get
// modified when the copy is manipulated.
func (msg *MsgTx) Copy() *MsgTx {
// Create new tx and start by copying primitive values and making space
// for the transaction inputs and outputs.
2013-05-08 21:31:00 +02:00
newTx := MsgTx{
Version: msg.Version,
TxIn: make([]*TxIn, 0, len(msg.TxIn)),
TxOut: make([]*TxOut, 0, len(msg.TxOut)),
LockTime: msg.LockTime,
2013-05-08 21:31:00 +02:00
}
// Deep copy the old TxIn data.
for _, oldTxIn := range msg.TxIn {
2013-05-08 21:31:00 +02:00
// Deep copy the old previous outpoint.
oldOutPoint := oldTxIn.PreviousOutPoint
2013-05-08 21:31:00 +02:00
newOutPoint := OutPoint{}
newOutPoint.Hash.SetBytes(oldOutPoint.Hash[:])
newOutPoint.Index = oldOutPoint.Index
// Deep copy the old signature script.
var newScript []byte
oldScript := oldTxIn.SignatureScript
oldScriptLen := len(oldScript)
if oldScriptLen > 0 {
newScript = make([]byte, oldScriptLen)
2013-05-08 21:31:00 +02:00
copy(newScript, oldScript[:oldScriptLen])
}
// Create new txIn with the deep copied data and append it to
// new Tx.
newTxIn := TxIn{
PreviousOutPoint: newOutPoint,
2013-05-08 21:31:00 +02:00
SignatureScript: newScript,
Sequence: oldTxIn.Sequence,
}
newTx.TxIn = append(newTx.TxIn, &newTxIn)
}
// Deep copy the old TxOut data.
for _, oldTxOut := range msg.TxOut {
2013-05-08 21:31:00 +02:00
// Deep copy the old PkScript
var newScript []byte
oldScript := oldTxOut.PkScript
oldScriptLen := len(oldScript)
if oldScriptLen > 0 {
newScript = make([]byte, oldScriptLen)
2013-05-08 21:31:00 +02:00
copy(newScript, oldScript[:oldScriptLen])
}
// Create new txOut with the deep copied data and append it to
// new Tx.
newTxOut := TxOut{
Value: oldTxOut.Value,
PkScript: newScript,
}
newTx.TxOut = append(newTx.TxOut, &newTxOut)
}
return &newTx
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding transactions stored to disk, such as in a
// database, as opposed to decoding transactions from the wire.
2013-05-08 21:31:00 +02:00
func (msg *MsgTx) BtcDecode(r io.Reader, pver uint32) error {
wire: Reduce allocs with a binary free list. This introduces a new binary free list which provides a concurrent safe list of unused buffers for the purpose of serializing and deserializing primitive integers to their raw binary bytes. For convenience, the type also provides functions for each of the primitive unsigned integers that automatically obtain a buffer from the free list, perform the necessary binary conversion, read from or write to the given io.Reader or io.Writer, and return the buffer to the free list. A global instance of the type has been introduced with a maximum number of 1024 items. Since each buffer is 8 bytes, it will consume a maximum of 8KB. Theoretically, this value would only allow up to 1024 peers simultaneously reading and writing without having to resort to burdening the garbage collector with additional allocations. However, due to the fact the code is designed in such a way that the buffers are quickly used and returned to the free list, in practice it can support much more than 1024 peers without involving the garbage collector since it is highly unlikely every peer would need a buffer at the exact same time. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ------------------------------------------------------------- WriteVarInt1 1 0 -100.00% WriteVarInt3 1 0 -100.00% WriteVarInt5 1 0 -100.00% WriteVarInt9 1 0 -100.00% ReadVarInt1 1 0 -100.00% ReadVarInt3 1 0 -100.00% ReadVarInt5 1 0 -100.00% ReadVarInt9 1 0 -100.00% ReadVarStr4 3 2 -33.33% ReadVarStr10 3 2 -33.33% WriteVarStr4 2 1 -50.00% WriteVarStr10 2 1 -50.00% ReadOutPoint 1 0 -100.00% WriteOutPoint 1 0 -100.00% ReadTxOut 3 1 -66.67% WriteTxOut 2 0 -100.00% ReadTxIn 5 2 -60.00% WriteTxIn 3 0 -100.00% DeserializeTxSmall 15 7 -53.33% DeserializeTxLarge 33428 16715 -50.00% SerializeTx 8 0 -100.00% ReadBlockHeader 7 1 -85.71% WriteBlockHeader 10 4 -60.00% DecodeGetHeaders 1004 501 -50.10% DecodeHeaders 18002 4001 -77.77% DecodeGetBlocks 1004 501 -50.10% DecodeAddr 9002 4001 -55.55% DecodeInv 150005 50003 -66.67% DecodeNotFound 150004 50002 -66.67% DecodeMerkleBlock 222 108 -51.35% TxSha 10 2 -80.00%
2016-04-21 06:03:00 +02:00
version, err := binarySerializer.Uint32(r, littleEndian)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
wire: Reduce allocs with a binary free list. This introduces a new binary free list which provides a concurrent safe list of unused buffers for the purpose of serializing and deserializing primitive integers to their raw binary bytes. For convenience, the type also provides functions for each of the primitive unsigned integers that automatically obtain a buffer from the free list, perform the necessary binary conversion, read from or write to the given io.Reader or io.Writer, and return the buffer to the free list. A global instance of the type has been introduced with a maximum number of 1024 items. Since each buffer is 8 bytes, it will consume a maximum of 8KB. Theoretically, this value would only allow up to 1024 peers simultaneously reading and writing without having to resort to burdening the garbage collector with additional allocations. However, due to the fact the code is designed in such a way that the buffers are quickly used and returned to the free list, in practice it can support much more than 1024 peers without involving the garbage collector since it is highly unlikely every peer would need a buffer at the exact same time. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ------------------------------------------------------------- WriteVarInt1 1 0 -100.00% WriteVarInt3 1 0 -100.00% WriteVarInt5 1 0 -100.00% WriteVarInt9 1 0 -100.00% ReadVarInt1 1 0 -100.00% ReadVarInt3 1 0 -100.00% ReadVarInt5 1 0 -100.00% ReadVarInt9 1 0 -100.00% ReadVarStr4 3 2 -33.33% ReadVarStr10 3 2 -33.33% WriteVarStr4 2 1 -50.00% WriteVarStr10 2 1 -50.00% ReadOutPoint 1 0 -100.00% WriteOutPoint 1 0 -100.00% ReadTxOut 3 1 -66.67% WriteTxOut 2 0 -100.00% ReadTxIn 5 2 -60.00% WriteTxIn 3 0 -100.00% DeserializeTxSmall 15 7 -53.33% DeserializeTxLarge 33428 16715 -50.00% SerializeTx 8 0 -100.00% ReadBlockHeader 7 1 -85.71% WriteBlockHeader 10 4 -60.00% DecodeGetHeaders 1004 501 -50.10% DecodeHeaders 18002 4001 -77.77% DecodeGetBlocks 1004 501 -50.10% DecodeAddr 9002 4001 -55.55% DecodeInv 150005 50003 -66.67% DecodeNotFound 150004 50002 -66.67% DecodeMerkleBlock 222 108 -51.35% TxSha 10 2 -80.00%
2016-04-21 06:03:00 +02:00
msg.Version = int32(version)
2013-05-08 21:31:00 +02:00
count, err := ReadVarInt(r, pver)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
// Prevent more input transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if count > uint64(maxTxInPerMessage) {
str := fmt.Sprintf("too many input transactions to fit into "+
"max message size [count %d, max %d]", count,
maxTxInPerMessage)
return messageError("MsgTx.BtcDecode", str)
}
// returnScriptBuffers is a closure that returns any script buffers that
// were borrowed from the pool when there are any deserialization
// errors. This is only valid to call before the final step which
// replaces the scripts with the location in a contiguous buffer and
// returns them.
returnScriptBuffers := func() {
for _, txIn := range msg.TxIn {
if txIn == nil || txIn.SignatureScript == nil {
continue
}
scriptPool.Return(txIn.SignatureScript)
}
for _, txOut := range msg.TxOut {
if txOut == nil || txOut.PkScript == nil {
continue
}
scriptPool.Return(txOut.PkScript)
}
}
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
// Deserialize the inputs.
var totalScriptSize uint64
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
txIns := make([]TxIn, count)
msg.TxIn = make([]*TxIn, count)
2013-05-08 21:31:00 +02:00
for i := uint64(0); i < count; i++ {
// The pointer is set now in case a script buffer is borrowed
// and needs to be returned to the pool on error.
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
ti := &txIns[i]
msg.TxIn[i] = ti
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
err = readTxIn(r, pver, msg.Version, ti)
2013-05-08 21:31:00 +02:00
if err != nil {
returnScriptBuffers()
2013-05-08 21:31:00 +02:00
return err
}
totalScriptSize += uint64(len(ti.SignatureScript))
2013-05-08 21:31:00 +02:00
}
count, err = ReadVarInt(r, pver)
2013-05-08 21:31:00 +02:00
if err != nil {
returnScriptBuffers()
2013-05-08 21:31:00 +02:00
return err
}
// Prevent more output transactions than could possibly fit into a
// message. It would be possible to cause memory exhaustion and panics
// without a sane upper bound on this count.
if count > uint64(maxTxOutPerMessage) {
returnScriptBuffers()
str := fmt.Sprintf("too many output transactions to fit into "+
"max message size [count %d, max %d]", count,
maxTxOutPerMessage)
return messageError("MsgTx.BtcDecode", str)
}
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
// Deserialize the outputs.
txOuts := make([]TxOut, count)
msg.TxOut = make([]*TxOut, count)
2013-05-08 21:31:00 +02:00
for i := uint64(0); i < count; i++ {
// The pointer is set now in case a script buffer is borrowed
// and needs to be returned to the pool on error.
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
to := &txOuts[i]
msg.TxOut[i] = to
wire: Reduce allocs with contiguous slices. The current code involves a ton of small allocations which is harsh on the garbage collector and in turn causes a lot of addition runtime overhead both in terms of additional memory and processing time. In order to improve the situation, this drasticially reduces the number of allocations by creating contiguous slices of objects and deserializing into them. Since the final data structures consist of slices of pointers to the objects, they are constructed by pointing them into the appropriate offset of the contiguous slice. This could be improved upon even further by converting all of the data structures provided the wire package to be slices of contiguous objects directly, however that would be a major breaking API change and would end up requiring updating a lot more code in every caller. I do think that ultimately the API should be changed, but the changes in this commit already makes a massive difference and it doesn't require touching any of the callers, so it is a good place to begin. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ----------------------------------------------------------- DeserializeTxLarge 16715 11146 -33.32% DecodeGetHeaders 501 2 -99.60% DecodeHeaders 2001 2 -99.90% DecodeGetBlocks 501 2 -99.60% DecodeAddr 3001 2002 -33.29% DecodeInv 50003 3 -99.99% DecodeNotFound 50002 3 -99.99% DecodeMerkleBlock 107 3 -97.20%
2016-04-21 23:49:38 +02:00
err = readTxOut(r, pver, msg.Version, to)
2013-05-08 21:31:00 +02:00
if err != nil {
returnScriptBuffers()
2013-05-08 21:31:00 +02:00
return err
}
totalScriptSize += uint64(len(to.PkScript))
2013-05-08 21:31:00 +02:00
}
wire: Reduce allocs with a binary free list. This introduces a new binary free list which provides a concurrent safe list of unused buffers for the purpose of serializing and deserializing primitive integers to their raw binary bytes. For convenience, the type also provides functions for each of the primitive unsigned integers that automatically obtain a buffer from the free list, perform the necessary binary conversion, read from or write to the given io.Reader or io.Writer, and return the buffer to the free list. A global instance of the type has been introduced with a maximum number of 1024 items. Since each buffer is 8 bytes, it will consume a maximum of 8KB. Theoretically, this value would only allow up to 1024 peers simultaneously reading and writing without having to resort to burdening the garbage collector with additional allocations. However, due to the fact the code is designed in such a way that the buffers are quickly used and returned to the free list, in practice it can support much more than 1024 peers without involving the garbage collector since it is highly unlikely every peer would need a buffer at the exact same time. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ------------------------------------------------------------- WriteVarInt1 1 0 -100.00% WriteVarInt3 1 0 -100.00% WriteVarInt5 1 0 -100.00% WriteVarInt9 1 0 -100.00% ReadVarInt1 1 0 -100.00% ReadVarInt3 1 0 -100.00% ReadVarInt5 1 0 -100.00% ReadVarInt9 1 0 -100.00% ReadVarStr4 3 2 -33.33% ReadVarStr10 3 2 -33.33% WriteVarStr4 2 1 -50.00% WriteVarStr10 2 1 -50.00% ReadOutPoint 1 0 -100.00% WriteOutPoint 1 0 -100.00% ReadTxOut 3 1 -66.67% WriteTxOut 2 0 -100.00% ReadTxIn 5 2 -60.00% WriteTxIn 3 0 -100.00% DeserializeTxSmall 15 7 -53.33% DeserializeTxLarge 33428 16715 -50.00% SerializeTx 8 0 -100.00% ReadBlockHeader 7 1 -85.71% WriteBlockHeader 10 4 -60.00% DecodeGetHeaders 1004 501 -50.10% DecodeHeaders 18002 4001 -77.77% DecodeGetBlocks 1004 501 -50.10% DecodeAddr 9002 4001 -55.55% DecodeInv 150005 50003 -66.67% DecodeNotFound 150004 50002 -66.67% DecodeMerkleBlock 222 108 -51.35% TxSha 10 2 -80.00%
2016-04-21 06:03:00 +02:00
msg.LockTime, err = binarySerializer.Uint32(r, littleEndian)
2013-05-08 21:31:00 +02:00
if err != nil {
returnScriptBuffers()
2013-05-08 21:31:00 +02:00
return err
}
// Create a single allocation to house all of the scripts and set each
// input signature script and output public key script to the
// appropriate subslice of the overall contiguous buffer. Then, return
// each individual script buffer back to the pool so they can be reused
// for future deserializations. This is done because it significantly
// reduces the number of allocations the garbage collector needs to
// track, which in turn improves performance and drastically reduces the
// amount of runtime overhead that would otherwise be needed to keep
// track of millions of small allocations.
//
// NOTE: It is no longer valid to call the returnScriptBuffers closure
// after these blocks of code run because it is already done and the
// scripts in the transaction inputs and outputs no longer point to the
// buffers.
var offset uint64
scripts := make([]byte, totalScriptSize)
for i := 0; i < len(msg.TxIn); i++ {
// Copy the signature script into the contiguous buffer at the
// appropriate offset.
signatureScript := msg.TxIn[i].SignatureScript
copy(scripts[offset:], signatureScript)
// Reset the signature script of the transaction input to the
// slice of the contiguous buffer where the script lives.
scriptSize := uint64(len(signatureScript))
end := offset + scriptSize
msg.TxIn[i].SignatureScript = scripts[offset:end:end]
offset += scriptSize
// Return the temporary script buffer to the pool.
scriptPool.Return(signatureScript)
}
for i := 0; i < len(msg.TxOut); i++ {
// Copy the public key script into the contiguous buffer at the
// appropriate offset.
pkScript := msg.TxOut[i].PkScript
copy(scripts[offset:], pkScript)
// Reset the public key script of the transaction output to the
// slice of the contiguous buffer where the script lives.
scriptSize := uint64(len(pkScript))
end := offset + scriptSize
msg.TxOut[i].PkScript = scripts[offset:end:end]
offset += scriptSize
// Return the temporary script buffer to the pool.
scriptPool.Return(pkScript)
}
2013-05-08 21:31:00 +02:00
return nil
}
// Deserialize decodes a transaction from r into the receiver using a format
// that is suitable for long-term storage such as a database while respecting
// the Version field in the transaction. This function differs from BtcDecode
// in that BtcDecode decodes from the bitcoin wire protocol as it was sent
// across the network. The wire encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of BtcDecode.
return msg.BtcDecode(r, 0)
}
2013-05-08 21:31:00 +02:00
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding transactions to be stored to disk, such as in a
// database, as opposed to encoding transactions for the wire.
2013-05-08 21:31:00 +02:00
func (msg *MsgTx) BtcEncode(w io.Writer, pver uint32) error {
wire: Reduce allocs with a binary free list. This introduces a new binary free list which provides a concurrent safe list of unused buffers for the purpose of serializing and deserializing primitive integers to their raw binary bytes. For convenience, the type also provides functions for each of the primitive unsigned integers that automatically obtain a buffer from the free list, perform the necessary binary conversion, read from or write to the given io.Reader or io.Writer, and return the buffer to the free list. A global instance of the type has been introduced with a maximum number of 1024 items. Since each buffer is 8 bytes, it will consume a maximum of 8KB. Theoretically, this value would only allow up to 1024 peers simultaneously reading and writing without having to resort to burdening the garbage collector with additional allocations. However, due to the fact the code is designed in such a way that the buffers are quickly used and returned to the free list, in practice it can support much more than 1024 peers without involving the garbage collector since it is highly unlikely every peer would need a buffer at the exact same time. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ------------------------------------------------------------- WriteVarInt1 1 0 -100.00% WriteVarInt3 1 0 -100.00% WriteVarInt5 1 0 -100.00% WriteVarInt9 1 0 -100.00% ReadVarInt1 1 0 -100.00% ReadVarInt3 1 0 -100.00% ReadVarInt5 1 0 -100.00% ReadVarInt9 1 0 -100.00% ReadVarStr4 3 2 -33.33% ReadVarStr10 3 2 -33.33% WriteVarStr4 2 1 -50.00% WriteVarStr10 2 1 -50.00% ReadOutPoint 1 0 -100.00% WriteOutPoint 1 0 -100.00% ReadTxOut 3 1 -66.67% WriteTxOut 2 0 -100.00% ReadTxIn 5 2 -60.00% WriteTxIn 3 0 -100.00% DeserializeTxSmall 15 7 -53.33% DeserializeTxLarge 33428 16715 -50.00% SerializeTx 8 0 -100.00% ReadBlockHeader 7 1 -85.71% WriteBlockHeader 10 4 -60.00% DecodeGetHeaders 1004 501 -50.10% DecodeHeaders 18002 4001 -77.77% DecodeGetBlocks 1004 501 -50.10% DecodeAddr 9002 4001 -55.55% DecodeInv 150005 50003 -66.67% DecodeNotFound 150004 50002 -66.67% DecodeMerkleBlock 222 108 -51.35% TxSha 10 2 -80.00%
2016-04-21 06:03:00 +02:00
err := binarySerializer.PutUint32(w, littleEndian, uint32(msg.Version))
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
count := uint64(len(msg.TxIn))
err = WriteVarInt(w, pver, count)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
for _, ti := range msg.TxIn {
err = writeTxIn(w, pver, msg.Version, ti)
if err != nil {
return err
}
}
count = uint64(len(msg.TxOut))
err = WriteVarInt(w, pver, count)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
for _, to := range msg.TxOut {
err = writeTxOut(w, pver, msg.Version, to)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
}
return binarySerializer.PutUint32(w, littleEndian, msg.LockTime)
2013-05-08 21:31:00 +02:00
}
// Serialize encodes the transaction to w using a format that suitable for
// long-term storage such as a database while respecting the Version field in
// the transaction. This function differs from BtcEncode in that BtcEncode
// encodes the transaction to the bitcoin wire protocol in order to be sent
// across the network. The wire encoding can technically differ depending on
// the protocol version and doesn't even really need to match the format of a
// stored transaction at all. As of the time this comment was written, the
// encoded transaction is the same in both instances, but there is a distinct
// difference and separating the two allows the API to be flexible enough to
// deal with changes.
func (msg *MsgTx) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of BtcEncode.
return msg.BtcEncode(w, 0)
}
// SerializeSize returns the number of bytes it would take to serialize the
// the transaction.
func (msg *MsgTx) SerializeSize() int {
// Version 4 bytes + LockTime 4 bytes + Serialized varint size for the
// number of transaction inputs and outputs.
n := 8 + VarIntSerializeSize(uint64(len(msg.TxIn))) +
VarIntSerializeSize(uint64(len(msg.TxOut)))
for _, txIn := range msg.TxIn {
n += txIn.SerializeSize()
}
for _, txOut := range msg.TxOut {
n += txOut.SerializeSize()
}
return n
}
2013-05-08 21:31:00 +02:00
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgTx) Command() string {
return CmdTx
2013-05-08 21:31:00 +02:00
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgTx) MaxPayloadLength(pver uint32) uint32 {
return MaxBlockPayload
2013-05-08 21:31:00 +02:00
}
// PkScriptLocs returns a slice containing the start of each public key script
// within the raw serialized transaction. The caller can easily obtain the
// length of each script by using len on the script available via the
// appropriate transaction output entry.
func (msg *MsgTx) PkScriptLocs() []int {
numTxOut := len(msg.TxOut)
if numTxOut == 0 {
return nil
}
// The starting offset in the serialized transaction of the first
// transaction output is:
//
// Version 4 bytes + serialized varint size for the number of
// transaction inputs and outputs + serialized size of each transaction
// input.
n := 4 + VarIntSerializeSize(uint64(len(msg.TxIn))) +
VarIntSerializeSize(uint64(numTxOut))
for _, txIn := range msg.TxIn {
n += txIn.SerializeSize()
}
// Calculate and set the appropriate offset for each public key script.
pkScriptLocs := make([]int, numTxOut)
for i, txOut := range msg.TxOut {
// The offset of the script in the transaction output is:
//
// Value 8 bytes + serialized varint size for the length of
// PkScript.
n += 8 + VarIntSerializeSize(uint64(len(txOut.PkScript)))
pkScriptLocs[i] = n
n += len(txOut.PkScript)
}
return pkScriptLocs
}
2013-05-08 21:31:00 +02:00
// NewMsgTx returns a new bitcoin tx message that conforms to the Message
// interface. The return instance has a default version of TxVersion and there
// are no transaction inputs or outputs. Also, the lock time is set to zero
// to indicate the transaction is valid immediately as opposed to some time in
// future.
func NewMsgTx(version int32) *MsgTx {
return &MsgTx{
Version: version,
TxIn: make([]*TxIn, 0, defaultTxInOutAlloc),
TxOut: make([]*TxOut, 0, defaultTxInOutAlloc),
}
2013-05-08 21:31:00 +02:00
}
// readOutPoint reads the next sequence of bytes from r as an OutPoint.
func readOutPoint(r io.Reader, pver uint32, version int32, op *OutPoint) error {
_, err := io.ReadFull(r, op.Hash[:])
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
wire: Reduce allocs with a binary free list. This introduces a new binary free list which provides a concurrent safe list of unused buffers for the purpose of serializing and deserializing primitive integers to their raw binary bytes. For convenience, the type also provides functions for each of the primitive unsigned integers that automatically obtain a buffer from the free list, perform the necessary binary conversion, read from or write to the given io.Reader or io.Writer, and return the buffer to the free list. A global instance of the type has been introduced with a maximum number of 1024 items. Since each buffer is 8 bytes, it will consume a maximum of 8KB. Theoretically, this value would only allow up to 1024 peers simultaneously reading and writing without having to resort to burdening the garbage collector with additional allocations. However, due to the fact the code is designed in such a way that the buffers are quickly used and returned to the free list, in practice it can support much more than 1024 peers without involving the garbage collector since it is highly unlikely every peer would need a buffer at the exact same time. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ------------------------------------------------------------- WriteVarInt1 1 0 -100.00% WriteVarInt3 1 0 -100.00% WriteVarInt5 1 0 -100.00% WriteVarInt9 1 0 -100.00% ReadVarInt1 1 0 -100.00% ReadVarInt3 1 0 -100.00% ReadVarInt5 1 0 -100.00% ReadVarInt9 1 0 -100.00% ReadVarStr4 3 2 -33.33% ReadVarStr10 3 2 -33.33% WriteVarStr4 2 1 -50.00% WriteVarStr10 2 1 -50.00% ReadOutPoint 1 0 -100.00% WriteOutPoint 1 0 -100.00% ReadTxOut 3 1 -66.67% WriteTxOut 2 0 -100.00% ReadTxIn 5 2 -60.00% WriteTxIn 3 0 -100.00% DeserializeTxSmall 15 7 -53.33% DeserializeTxLarge 33428 16715 -50.00% SerializeTx 8 0 -100.00% ReadBlockHeader 7 1 -85.71% WriteBlockHeader 10 4 -60.00% DecodeGetHeaders 1004 501 -50.10% DecodeHeaders 18002 4001 -77.77% DecodeGetBlocks 1004 501 -50.10% DecodeAddr 9002 4001 -55.55% DecodeInv 150005 50003 -66.67% DecodeNotFound 150004 50002 -66.67% DecodeMerkleBlock 222 108 -51.35% TxSha 10 2 -80.00%
2016-04-21 06:03:00 +02:00
op.Index, err = binarySerializer.Uint32(r, littleEndian)
return err
2013-05-08 21:31:00 +02:00
}
// writeOutPoint encodes op to the bitcoin protocol encoding for an OutPoint
// to w.
func writeOutPoint(w io.Writer, pver uint32, version int32, op *OutPoint) error {
_, err := w.Write(op.Hash[:])
if err != nil {
return err
}
return binarySerializer.PutUint32(w, littleEndian, op.Index)
2013-05-08 21:31:00 +02:00
}
// readScript reads a variable length byte array that represents a transaction
// script. It is encoded as a varInt containing the length of the array
// followed by the bytes themselves. An error is returned if the length is
// greater than the passed maxAllowed parameter which helps protect against
// memory exhuastion attacks and forced panics thorugh malformed messages. The
// fieldName parameter is only used for the error message so it provides more
// context in the error.
func readScript(r io.Reader, pver uint32, maxAllowed uint32, fieldName string) ([]byte, error) {
count, err := ReadVarInt(r, pver)
if err != nil {
return nil, err
}
// Prevent byte array larger than the max message size. It would
// be possible to cause memory exhaustion and panics without a sane
// upper bound on this count.
if count > uint64(maxAllowed) {
str := fmt.Sprintf("%s is larger than the max allowed size "+
"[count %d, max %d]", fieldName, count, maxAllowed)
return nil, messageError("readScript", str)
}
b := scriptPool.Borrow(count)
_, err = io.ReadFull(r, b)
if err != nil {
scriptPool.Return(b)
return nil, err
}
return b, nil
}
2013-05-08 21:31:00 +02:00
// readTxIn reads the next sequence of bytes from r as a transaction input
// (TxIn).
func readTxIn(r io.Reader, pver uint32, version int32, ti *TxIn) error {
err := readOutPoint(r, pver, version, &ti.PreviousOutPoint)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
ti.SignatureScript, err = readScript(r, pver, MaxMessagePayload,
"transaction input signature script")
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
return readElement(r, &ti.Sequence)
2013-05-08 21:31:00 +02:00
}
// writeTxIn encodes ti to the bitcoin protocol encoding for a transaction
// input (TxIn) to w.
func writeTxIn(w io.Writer, pver uint32, version int32, ti *TxIn) error {
err := writeOutPoint(w, pver, version, &ti.PreviousOutPoint)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
err = WriteVarBytes(w, pver, ti.SignatureScript)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
return binarySerializer.PutUint32(w, littleEndian, ti.Sequence)
2013-05-08 21:31:00 +02:00
}
// readTxOut reads the next sequence of bytes from r as a transaction output
// (TxOut).
func readTxOut(r io.Reader, pver uint32, version int32, to *TxOut) error {
wire: Reduce allocs with a binary free list. This introduces a new binary free list which provides a concurrent safe list of unused buffers for the purpose of serializing and deserializing primitive integers to their raw binary bytes. For convenience, the type also provides functions for each of the primitive unsigned integers that automatically obtain a buffer from the free list, perform the necessary binary conversion, read from or write to the given io.Reader or io.Writer, and return the buffer to the free list. A global instance of the type has been introduced with a maximum number of 1024 items. Since each buffer is 8 bytes, it will consume a maximum of 8KB. Theoretically, this value would only allow up to 1024 peers simultaneously reading and writing without having to resort to burdening the garbage collector with additional allocations. However, due to the fact the code is designed in such a way that the buffers are quickly used and returned to the free list, in practice it can support much more than 1024 peers without involving the garbage collector since it is highly unlikely every peer would need a buffer at the exact same time. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ------------------------------------------------------------- WriteVarInt1 1 0 -100.00% WriteVarInt3 1 0 -100.00% WriteVarInt5 1 0 -100.00% WriteVarInt9 1 0 -100.00% ReadVarInt1 1 0 -100.00% ReadVarInt3 1 0 -100.00% ReadVarInt5 1 0 -100.00% ReadVarInt9 1 0 -100.00% ReadVarStr4 3 2 -33.33% ReadVarStr10 3 2 -33.33% WriteVarStr4 2 1 -50.00% WriteVarStr10 2 1 -50.00% ReadOutPoint 1 0 -100.00% WriteOutPoint 1 0 -100.00% ReadTxOut 3 1 -66.67% WriteTxOut 2 0 -100.00% ReadTxIn 5 2 -60.00% WriteTxIn 3 0 -100.00% DeserializeTxSmall 15 7 -53.33% DeserializeTxLarge 33428 16715 -50.00% SerializeTx 8 0 -100.00% ReadBlockHeader 7 1 -85.71% WriteBlockHeader 10 4 -60.00% DecodeGetHeaders 1004 501 -50.10% DecodeHeaders 18002 4001 -77.77% DecodeGetBlocks 1004 501 -50.10% DecodeAddr 9002 4001 -55.55% DecodeInv 150005 50003 -66.67% DecodeNotFound 150004 50002 -66.67% DecodeMerkleBlock 222 108 -51.35% TxSha 10 2 -80.00%
2016-04-21 06:03:00 +02:00
err := readElement(r, &to.Value)
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
to.PkScript, err = readScript(r, pver, MaxMessagePayload,
"transaction output public key script")
return err
2013-05-08 21:31:00 +02:00
}
// writeTxOut encodes to into the bitcoin protocol encoding for a transaction
// output (TxOut) to w.
func writeTxOut(w io.Writer, pver uint32, version int32, to *TxOut) error {
wire: Reduce allocs with a binary free list. This introduces a new binary free list which provides a concurrent safe list of unused buffers for the purpose of serializing and deserializing primitive integers to their raw binary bytes. For convenience, the type also provides functions for each of the primitive unsigned integers that automatically obtain a buffer from the free list, perform the necessary binary conversion, read from or write to the given io.Reader or io.Writer, and return the buffer to the free list. A global instance of the type has been introduced with a maximum number of 1024 items. Since each buffer is 8 bytes, it will consume a maximum of 8KB. Theoretically, this value would only allow up to 1024 peers simultaneously reading and writing without having to resort to burdening the garbage collector with additional allocations. However, due to the fact the code is designed in such a way that the buffers are quickly used and returned to the free list, in practice it can support much more than 1024 peers without involving the garbage collector since it is highly unlikely every peer would need a buffer at the exact same time. The following is a before and after comparison of the allocations with the benchmarks that did not change removed: benchmark old allocs new allocs delta ------------------------------------------------------------- WriteVarInt1 1 0 -100.00% WriteVarInt3 1 0 -100.00% WriteVarInt5 1 0 -100.00% WriteVarInt9 1 0 -100.00% ReadVarInt1 1 0 -100.00% ReadVarInt3 1 0 -100.00% ReadVarInt5 1 0 -100.00% ReadVarInt9 1 0 -100.00% ReadVarStr4 3 2 -33.33% ReadVarStr10 3 2 -33.33% WriteVarStr4 2 1 -50.00% WriteVarStr10 2 1 -50.00% ReadOutPoint 1 0 -100.00% WriteOutPoint 1 0 -100.00% ReadTxOut 3 1 -66.67% WriteTxOut 2 0 -100.00% ReadTxIn 5 2 -60.00% WriteTxIn 3 0 -100.00% DeserializeTxSmall 15 7 -53.33% DeserializeTxLarge 33428 16715 -50.00% SerializeTx 8 0 -100.00% ReadBlockHeader 7 1 -85.71% WriteBlockHeader 10 4 -60.00% DecodeGetHeaders 1004 501 -50.10% DecodeHeaders 18002 4001 -77.77% DecodeGetBlocks 1004 501 -50.10% DecodeAddr 9002 4001 -55.55% DecodeInv 150005 50003 -66.67% DecodeNotFound 150004 50002 -66.67% DecodeMerkleBlock 222 108 -51.35% TxSha 10 2 -80.00%
2016-04-21 06:03:00 +02:00
err := binarySerializer.PutUint64(w, littleEndian, uint64(to.Value))
2013-05-08 21:31:00 +02:00
if err != nil {
return err
}
return WriteVarBytes(w, pver, to.PkScript)
2013-05-08 21:31:00 +02:00
}