Minor hashing-related optimizations.

This commit contains three classes of optimizations:
 - Reducing the number of unnecessary hash copies
 - Improve the performance of the DoubleSha256 function
 - A couple of minor optimizations of the ShaHash functions

The first class is a result of the Bytes function on a ShaHash making a
copy of the bytes before returning them.  It really should have been named
CloneBytes, but that would break the API now.

To address this, a comment has been added to the function which explicitly
calls out the copy behavior.  In addition, all call sites of .Bytes on a
ShaHash in the code base have been updated to simply slice the array when
a copy is not needed.  This saves a significant amount of data copying.

The second optimization modifies the DoubleSha256 function to directly use
fastsha256.Sum256 instead of the hasher interface.  This reduces the
number of allocations needed.  A benchmark for the function has been added
as well.

old: BenchmarkDoubleSha256  500000   3691 ns/op   192 B/op   3 allocs/op
new: BenchmarkDoubleSha256  500000   3081 ns/op    32 B/op   1 allocs/op

The final optimizations are for the ShaHash IsEqual and SetBytes functions
which have been modified to make use of the fact the type is an array and
remove an unneeded subslice.
This commit is contained in:
Dave Collins 2015-04-04 13:25:49 -05:00
parent c80c8e7fe9
commit f5cdf2d6a8
9 changed files with 48 additions and 31 deletions

View file

@ -57,15 +57,14 @@ var (
// perform math comparisons. // perform math comparisons.
func ShaHashToBig(hash *wire.ShaHash) *big.Int { func ShaHashToBig(hash *wire.ShaHash) *big.Int {
// A ShaHash is in little-endian, but the big package wants the bytes // A ShaHash is in little-endian, but the big package wants the bytes
// in big-endian. Reverse them. ShaHash.Bytes makes a copy, so it // in big-endian, so reverse them.
// is safe to modify the returned buffer. buf := *hash
buf := hash.Bytes()
blen := len(buf) blen := len(buf)
for i := 0; i < blen/2; i++ { for i := 0; i < blen/2; i++ {
buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i] buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i]
} }
return new(big.Int).SetBytes(buf) return new(big.Int).SetBytes(buf[:])
} }
// CompactToBig converts a compact representation of a whole number N to an // CompactToBig converts a compact representation of a whole number N to an

View file

@ -31,8 +31,8 @@ func nextPowerOfTwo(n int) int {
func HashMerkleBranches(left *wire.ShaHash, right *wire.ShaHash) *wire.ShaHash { func HashMerkleBranches(left *wire.ShaHash, right *wire.ShaHash) *wire.ShaHash {
// Concatenate the left and right nodes. // Concatenate the left and right nodes.
var sha [wire.HashSize * 2]byte var sha [wire.HashSize * 2]byte
copy(sha[:wire.HashSize], left.Bytes()) copy(sha[:wire.HashSize], left[:])
copy(sha[wire.HashSize:], right.Bytes()) copy(sha[wire.HashSize:], right[:])
// Create a new sha hash from the double sha 256. Ignore the error // Create a new sha hash from the double sha 256. Ignore the error
// here since SetBytes can't fail here due to the fact DoubleSha256 // here since SetBytes can't fail here due to the fact DoubleSha256

View file

@ -134,10 +134,9 @@ func (db *LevelDb) setBlk(sha *wire.ShaHash, blkHeight int64, buf []byte) {
shaKey := shaBlkToKey(sha) shaKey := shaBlkToKey(sha)
blkKey := int64ToKey(blkHeight) blkKey := int64ToKey(blkHeight)
shaB := sha.Bytes() blkVal := make([]byte, len(sha)+len(buf))
blkVal := make([]byte, len(shaB)+len(buf)) copy(blkVal[0:], sha[:])
copy(blkVal[0:], shaB) copy(blkVal[len(sha):], buf)
copy(blkVal[len(shaB):], buf)
db.lBatch().Put(shaKey, lw[:]) db.lBatch().Put(shaKey, lw[:])
db.lBatch().Put(blkKey, blkVal) db.lBatch().Put(blkKey, blkVal)

View file

@ -641,8 +641,7 @@ func int64ToKey(keyint int64) []byte {
} }
func shaBlkToKey(sha *wire.ShaHash) []byte { func shaBlkToKey(sha *wire.ShaHash) []byte {
shaB := sha.Bytes() return sha[:]
return shaB
} }
// These are used here and in tx.go's deleteOldAddrIndex() to prevent deletion // These are used here and in tx.go's deleteOldAddrIndex() to prevent deletion
@ -651,15 +650,17 @@ var recordSuffixTx = []byte{'t', 'x'}
var recordSuffixSpentTx = []byte{'s', 'x'} var recordSuffixSpentTx = []byte{'s', 'x'}
func shaTxToKey(sha *wire.ShaHash) []byte { func shaTxToKey(sha *wire.ShaHash) []byte {
shaB := sha.Bytes() key := make([]byte, len(sha)+len(recordSuffixTx))
shaB = append(shaB, recordSuffixTx...) copy(key, sha[:])
return shaB copy(key[len(sha):], recordSuffixTx)
return key
} }
func shaSpentTxToKey(sha *wire.ShaHash) []byte { func shaSpentTxToKey(sha *wire.ShaHash) []byte {
shaB := sha.Bytes() key := make([]byte, len(sha)+len(recordSuffixSpentTx))
shaB = append(shaB, recordSuffixSpentTx...) copy(key, sha[:])
return shaB copy(key[len(sha):], recordSuffixSpentTx)
return key
} }
func (db *LevelDb) lBatch() *leveldb.Batch { func (db *LevelDb) lBatch() *leveldb.Batch {

View file

@ -537,7 +537,7 @@ func (db *LevelDb) UpdateAddrIndexForBlock(blkSha *wire.ShaHash, blkHeight int64
// Update tip of addrindex. // Update tip of addrindex.
newIndexTip := make([]byte, 40, 40) newIndexTip := make([]byte, 40, 40)
copy(newIndexTip[0:32], blkSha.Bytes()) copy(newIndexTip[0:32], blkSha[:])
binary.LittleEndian.PutUint64(newIndexTip[32:40], uint64(blkHeight)) binary.LittleEndian.PutUint64(newIndexTip[32:40], uint64(blkHeight))
batch.Put(addrIndexMetaDataKey, newIndexTip) batch.Put(addrIndexMetaDataKey, newIndexTip)

View file

@ -5,7 +5,6 @@
package main package main
import ( import (
"bytes"
"container/list" "container/list"
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/binary"
@ -215,13 +214,15 @@ func (s *server) handleUpdatePeerHeights(state *peerState, umsg updatePeerHeight
return return
} }
latestBlkSha := p.lastAnnouncedBlock.Bytes() // This is a pointer to the underlying memory which doesn't
// change.
latestBlkSha := p.lastAnnouncedBlock
p.StatsMtx.Unlock() p.StatsMtx.Unlock()
// If the peer has recently announced a block, and this block // If the peer has recently announced a block, and this block
// matches our newly accepted block, then update their block // matches our newly accepted block, then update their block
// height. // height.
if bytes.Equal(latestBlkSha, umsg.newSha.Bytes()) { if *latestBlkSha == *umsg.newSha {
p.UpdateLastBlockHeight(umsg.newHeight) p.UpdateLastBlockHeight(umsg.newHeight)
p.UpdateLastAnnouncedBlock(nil) p.UpdateLastAnnouncedBlock(nil)
} }

View file

@ -392,3 +392,20 @@ func BenchmarkTxSha(b *testing.B) {
genesisCoinbaseTx.TxSha() genesisCoinbaseTx.TxSha()
} }
} }
// BenchmarkDoubleSha256 performs a benchmark on how long it takes to perform a
// double sha 256.
func BenchmarkDoubleSha256(b *testing.B) {
b.StopTimer()
var buf bytes.Buffer
if err := genesisCoinbaseTx.Serialize(&buf); err != nil {
b.Errorf("Serialize: unexpected error: %v", err)
return
}
txBytes := buf.Bytes()
b.StartTimer()
for i := 0; i < b.N; i++ {
_ = DoubleSha256(txBytes)
}
}

View file

@ -522,10 +522,7 @@ func RandomUint64() (uint64, error) {
// DoubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes. // DoubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
func DoubleSha256(b []byte) []byte { func DoubleSha256(b []byte) []byte {
hasher := fastsha256.New() first := fastsha256.Sum256(b)
hasher.Write(b) second := fastsha256.Sum256(first[:])
sum := hasher.Sum(nil) return second[:]
hasher.Reset()
hasher.Write(sum)
return hasher.Sum(nil)
} }

View file

@ -5,7 +5,6 @@
package wire package wire
import ( import (
"bytes"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
) )
@ -34,6 +33,10 @@ func (hash ShaHash) String() string {
} }
// Bytes returns the bytes which represent the hash as a byte slice. // Bytes returns the bytes which represent the hash as a byte slice.
//
// NOTE: This makes a copy of the bytes and should have probably been named
// CloneBytes. It is generally cheaper to just slice the hash directly thereby
// reusing the same bytes rather than calling this method.
func (hash *ShaHash) Bytes() []byte { func (hash *ShaHash) Bytes() []byte {
newHash := make([]byte, HashSize) newHash := make([]byte, HashSize)
copy(newHash, hash[:]) copy(newHash, hash[:])
@ -49,14 +52,14 @@ func (hash *ShaHash) SetBytes(newHash []byte) error {
return fmt.Errorf("invalid sha length of %v, want %v", nhlen, return fmt.Errorf("invalid sha length of %v, want %v", nhlen,
HashSize) HashSize)
} }
copy(hash[:], newHash[0:HashSize]) copy(hash[:], newHash)
return nil return nil
} }
// IsEqual returns true if target is the same as hash. // IsEqual returns true if target is the same as hash.
func (hash *ShaHash) IsEqual(target *ShaHash) bool { func (hash *ShaHash) IsEqual(target *ShaHash) bool {
return bytes.Equal(hash[:], target[:]) return *hash == *target
} }
// NewShaHash returns a new ShaHash from a byte slice. An error is returned if // NewShaHash returns a new ShaHash from a byte slice. An error is returned if