gcs: add some line spacing, wrap comments to 80 characters

This commit is contained in:
Olaoluwa Osuntokun 2017-04-27 20:34:50 -07:00
parent e3c79234e6
commit 0f2eb80fdb
5 changed files with 96 additions and 60 deletions

View file

@ -55,8 +55,8 @@ func DeriveKey(keyHash *chainhash.Hash) [gcs.KeySize]byte {
}
// OutPointToFilterEntry is a utility function that derives a filter entry from
// a wire.OutPoint in a standardized way for use with both building and querying
// filters.
// a wire.OutPoint in a standardized way for use with both building and
// querying filters.
func OutPointToFilterEntry(outpoint wire.OutPoint) []byte {
// Size of the hash plus size of int32 index
data := make([]byte, chainhash.HashSize+4)
@ -118,7 +118,7 @@ func (b *GCSBuilder) SetP(p uint8) *GCSBuilder {
// Preallocate sets the estimated filter size after calling Builder() to reduce
// the probability of memory reallocations. If the builder has already had data
// added to it, SetN has no effect.
// added to it, Preallocate has no effect.
func (b *GCSBuilder) Preallocate(n uint32) *GCSBuilder {
// Do nothing if the builder's already errored out.
if b.err != nil {
@ -128,6 +128,7 @@ func (b *GCSBuilder) Preallocate(n uint32) *GCSBuilder {
if len(b.data) == 0 {
b.data = make([][]byte, 0, n)
}
return b
}
@ -157,8 +158,8 @@ func (b *GCSBuilder) AddEntries(data [][]byte) *GCSBuilder {
return b
}
// AddOutPoint adds a wire.OutPoint to the list of entries to be included in the
// GCS filter when it's built.
// AddOutPoint adds a wire.OutPoint to the list of entries to be included in
// the GCS filter when it's built.
func (b *GCSBuilder) AddOutPoint(outpoint wire.OutPoint) *GCSBuilder {
// Do nothing if the builder's already errored out.
if b.err != nil {
@ -181,7 +182,7 @@ func (b *GCSBuilder) AddHash(hash *chainhash.Hash) *GCSBuilder {
// AddScript adds all the data pushed in the script serialized as the passed
// []byte to the list of entries to be included in the GCS filter when it's
// built. T
// built.
func (b *GCSBuilder) AddScript(script []byte) *GCSBuilder {
// Do nothing if the builder's already errored out.
if b.err != nil {
@ -204,16 +205,16 @@ func (b *GCSBuilder) Build() (*gcs.Filter, error) {
return gcs.BuildGCSFilter(b.p, b.key, b.data)
}
// WithKeyPN creates a GCSBuilder with specified key and the passed
// probability and estimated filter size.
// WithKeyPN creates a GCSBuilder with specified key and the passed probability
// and estimated filter size.
func WithKeyPN(key [gcs.KeySize]byte, p uint8, n uint32) *GCSBuilder {
b := GCSBuilder{}
return b.SetKey(key).SetP(p).Preallocate(n)
}
// WithKeyP creates a GCSBuilder with specified key and the passed
// probability. Estimated filter size is set to zero, which means more
// reallocations are done when building the filter.
// WithKeyP creates a GCSBuilder with specified key and the passed probability.
// Estimated filter size is set to zero, which means more reallocations are
// done when building the filter.
func WithKeyP(key [gcs.KeySize]byte, p uint8) *GCSBuilder {
return WithKeyPN(key, p, 0)
}
@ -246,8 +247,8 @@ func WithKeyHash(keyHash *chainhash.Hash) *GCSBuilder {
return WithKeyHashPN(keyHash, DefaultP, 0)
}
// WithRandomKeyPN creates a GCSBuilder with a cryptographically random
// key and the passed probability and estimated filter size.
// WithRandomKeyPN creates a GCSBuilder with a cryptographically random key and
// the passed probability and estimated filter size.
func WithRandomKeyPN(p uint8, n uint32) *GCSBuilder {
key, err := RandomKey()
if err != nil {
@ -257,44 +258,63 @@ func WithRandomKeyPN(p uint8, n uint32) *GCSBuilder {
return WithKeyPN(key, p, n)
}
// WithRandomKeyP creates a GCSBuilder with a cryptographically random
// key and the passed probability. Estimated filter size is set to zero, which
// means more reallocations are done when building the filter.
// WithRandomKeyP creates a GCSBuilder with a cryptographically random key and
// the passed probability. Estimated filter size is set to zero, which means
// more reallocations are done when building the filter.
func WithRandomKeyP(p uint8) *GCSBuilder {
return WithRandomKeyPN(p, 0)
}
// WithRandomKey creates a GCSBuilder with a cryptographically random
// key. Probability is set to 20 (2^-20 collision probability). Estimated
// filter size is set to zero, which means more reallocations are done when
// WithRandomKey creates a GCSBuilder with a cryptographically random key.
// Probability is set to 20 (2^-20 collision probability). Estimated filter
// size is set to zero, which means more reallocations are done when
// building the filter.
func WithRandomKey() *GCSBuilder {
return WithRandomKeyPN(DefaultP, 0)
}
// BuildBasicFilter builds a basic GCS filter from a block.
// BuildBasicFilter builds a basic GCS filter from a block. A basic GCS filter
// will contain all the previous outpoints spent within a block, as well as the
// data pushes within all the outputs created within a block.
func BuildBasicFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
blockHash := block.BlockHash()
b := WithKeyHash(&blockHash)
// If the filter had an issue with the specified key, then we force it
// to bubble up here by calling the Key() function.
_, err := b.Key()
if err != nil {
return nil, err
}
// In order to build a basic filter, we'll range over the entire block,
// adding the outpoint data as well as the data pushes within the
// pkScript.
for i, tx := range block.Transactions {
// Skip the inputs for the coinbase transaction
if i != 0 {
// Each each txin, we'll add a serialized version of
// the txid:index to the filters data slices.
for _, txIn := range tx.TxIn {
b.AddOutPoint(txIn.PreviousOutPoint)
}
}
// For each output in a transaction, we'll add each of the
// individual data pushes within the script.
for _, txOut := range tx.TxOut {
b.AddScript(txOut.PkScript)
}
}
return b.Build()
}
// BuildExtFilter builds an extended GCS filter from a block.
// BuildExtFilter builds an extended GCS filter from a block. An extended
// filter supplements a regular basic filter by include all the _witness_ data
// found within a block. This includes all the data pushes within any signature
// scripts as well as each element of an input's witness stack. Additionally,
// the _hashes_ of each transaction are also inserted into the filter.
func BuildExtFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
blockHash := block.BlockHash()
b := WithKeyHash(&blockHash)
@ -312,6 +332,7 @@ func BuildExtFilter(block *wire.MsgBlock) (*gcs.Filter, error) {
}
}
}
return b.Build()
}
@ -326,8 +347,14 @@ func GetFilterHash(filter *gcs.Filter) chainhash.Hash {
func MakeHeaderForFilter(filter *gcs.Filter, prevHeader chainhash.Hash) chainhash.Hash {
filterTip := make([]byte, 2*chainhash.HashSize)
filterHash := GetFilterHash(filter)
// In the buffer we created above we'll compute hash || prevHash as an
// intermediate value.
copy(filterTip, filterHash[:])
copy(filterTip[chainhash.HashSize:], prevHeader[:])
// The final filter hash is the double-sha256 of the hash computed
// above.
hash1 := chainhash.HashH(filterTip)
return chainhash.HashH(hash1[:])
}

View file

@ -35,14 +35,13 @@ const (
KeySize = 16
)
// Filter describes an immutable filter that can be built from
// a set of data elements, serialized, deserialized, and queried
// in a thread-safe manner. The serialized form is compressed as
// a Golomb Coded Set (GCS), but does not include N or P to allow
// the user to encode the metadata separately if necessary. The
// hash function used is SipHash, a keyed function; the key used
// in building the filter is required in order to match filter
// values and is not included in the serialized form.
// Filter describes an immutable filter that can be built from a set of data
// elements, serialized, deserialized, and queried in a thread-safe manner. The
// serialized form is compressed as a Golomb Coded Set (GCS), but does not
// include N or P to allow the user to encode the metadata separately if
// necessary. The hash function used is SipHash, a keyed function; the key used
// in building the filter is required in order to match filter values and is
// not included in the serialized form.
type Filter struct {
n uint32
p uint8
@ -54,9 +53,7 @@ type Filter struct {
// BuildGCSFilter builds a new GCS filter with the collision probability of
// `1/(2**P)`, key `key`, and including every `[]byte` in `data` as a member of
// the set.
func BuildGCSFilter(P uint8, key [KeySize]byte,
data [][]byte) (*Filter, error) {
func BuildGCSFilter(P uint8, key [KeySize]byte, data [][]byte) (*Filter, error) {
// Some initial parameter checks: make sure we have data from which to
// build the filter, and make sure our parameters will fit the hash
// function we're using.
@ -97,10 +94,12 @@ func BuildGCSFilter(P uint8, key [KeySize]byte,
// Calculate the difference between this value and the last,
// modulo P.
remainder = (v - lastValue) % f.modulusP
// Calculate the difference between this value and the last,
// divided by P.
value = (v - lastValue - remainder) / f.modulusP
lastValue = v
// Write the P multiple into the bitstream in unary; the
// average should be around 1 (2 bits - 0b10).
for value > 0 {
@ -108,6 +107,7 @@ func BuildGCSFilter(P uint8, key [KeySize]byte,
value--
}
b.WriteBit(false)
// Write the remainder as a big-endian integer with enough bits
// to represent the appropriate collision probability.
b.WriteBits(remainder, int(f.p))
@ -115,11 +115,12 @@ func BuildGCSFilter(P uint8, key [KeySize]byte,
// Copy the bitstream into the filter object and return the object.
f.filterData = b.Bytes()
return &f, nil
}
// FromBytes deserializes a GCS filter from a known N, P, and serialized
// filter as returned by Bytes().
// FromBytes deserializes a GCS filter from a known N, P, and serialized filter
// as returned by Bytes().
func FromBytes(N uint32, P uint8, d []byte) (*Filter, error) {
// Basic sanity check.
@ -138,6 +139,7 @@ func FromBytes(N uint32, P uint8, d []byte) (*Filter, error) {
// Copy the filter.
f.filterData = make([]byte, len(d))
copy(f.filterData, d)
return f, nil
}
@ -206,8 +208,8 @@ func (f *Filter) N() uint32 {
return f.n
}
// Match checks whether a []byte value is likely (within collision
// probability) to be a member of the set represented by the filter.
// Match checks whether a []byte value is likely (within collision probability)
// to be a member of the set represented by the filter.
func (f *Filter) Match(key [KeySize]byte, data []byte) (bool, error) {
// Create a filter bitstream.
@ -220,8 +222,9 @@ func (f *Filter) Match(key [KeySize]byte, data []byte) (bool, error) {
// Go through the search filter and look for the desired value.
var lastValue uint64
for lastValue < term {
// Read the difference between previous and new value
// from bitstream.
// Read the difference between previous and new value from
// bitstream.
value, err := f.readFullUint64(b)
if err != nil {
if err == io.EOF {
@ -229,19 +232,22 @@ func (f *Filter) Match(key [KeySize]byte, data []byte) (bool, error) {
}
return false, err
}
// Add the previous value to it.
value += lastValue
if value == term {
return true, nil
}
lastValue = value
}
return false, nil
}
// MatchAny returns checks whether any []byte value is likely (within
// collision probability) to be a member of the set represented by the
// filter faster than calling Match() for each value individually.
// MatchAny returns checks whether any []byte value is likely (within collision
// probability) to be a member of the set represented by the filter faster than
// calling Match() for each value individually.
func (f *Filter) MatchAny(key [KeySize]byte, data [][]byte) (bool, error) {
// Basic sanity check.
@ -262,7 +268,8 @@ func (f *Filter) MatchAny(key [KeySize]byte, data [][]byte) (bool, error) {
sort.Sort(values)
// Zip down the filters, comparing values until we either run out of
// values to compare in one of the filters or we reach a matching value.
// values to compare in one of the filters or we reach a matching
// value.
var lastValue1, lastValue2 uint64
lastValue2 = values[0]
i := 1
@ -292,13 +299,14 @@ func (f *Filter) MatchAny(key [KeySize]byte, data [][]byte) (bool, error) {
lastValue1 += value
}
}
// If we've made it this far, an element matched between filters so
// we return true.
// If we've made it this far, an element matched between filters so we
// return true.
return true, nil
}
// readFullUint64 reads a value represented by the sum of a unary multiple
// of the filter's P modulus (`2**P`) and a big-endian P-bit remainder.
// readFullUint64 reads a value represented by the sum of a unary multiple of
// the filter's P modulus (`2**P`) and a big-endian P-bit remainder.
func (f *Filter) readFullUint64(b *bstream.BStream) (uint64, error) {
var v uint64

View file

@ -22,8 +22,8 @@ var (
P = uint8(20)
// Filters are conserved between tests but we must define with an
// interface which functions we're testing because the gcsFilter
// type isn't exported
// interface which functions we're testing because the gcsFilter type
// isn't exported
filter, filter2, filter3, filter4, filter5 *gcs.Filter
// We need to use the same key for building and querying the filters
@ -73,8 +73,8 @@ var (
)
// TestGCSFilterBuild builds a test filter with a randomized key. For Bitcoin
// use, deterministic filter generation is desired. Therefore, a
// key that's derived deterministically would be required.
// use, deterministic filter generation is desired. Therefore, a key that's
// derived deterministically would be required.
func TestGCSFilterBuild(t *testing.T) {
for i := 0; i < gcs.KeySize; i += 4 {
binary.BigEndian.PutUint32(key[i:], rand.Uint32())
@ -105,8 +105,8 @@ func TestGCSFilterCopy(t *testing.T) {
}
}
// TestGCSFilterMetadata checks that the filter metadata is built and
// copied correctly.
// TestGCSFilterMetadata checks that the filter metadata is built and copied
// correctly.
func TestGCSFilterMetadata(t *testing.T) {
if filter.P() != P {
t.Fatal("P not correctly stored in filter metadata")
@ -213,8 +213,8 @@ func TestGCSFilterMatch(t *testing.T) {
}
}
// TestGCSFilterMatchAny checks that both the built and copied filters match
// a list correctly, logging any false positives without failing on them.
// TestGCSFilterMatchAny checks that both the built and copied filters match a
// list correctly, logging any false positives without failing on them.
func TestGCSFilterMatchAny(t *testing.T) {
match, err := filter.MatchAny(key, contents2)
if err != nil {

View file

@ -41,7 +41,8 @@ func BenchmarkGCSFilterMatch(b *testing.B) {
}
}
// BenchmarkGCSFilterMatchAny benchmarks querying a filter for a list of values.
// BenchmarkGCSFilterMatchAny benchmarks querying a filter for a list of
// values.
func BenchmarkGCSFilterMatchAny(b *testing.B) {
for i := 0; i < b.N; i++ {
filter.MatchAny(key, contents2)

View file

@ -5,8 +5,8 @@
package gcs
// uint64slice is a package-local utility class that allows us to use Go's
// sort package to sort a []uint64 by implementing sort.Interface.
// uint64slice is a package-local utility class that allows us to use Go's sort
// package to sort a []uint64 by implementing sort.Interface.
type uint64Slice []uint64
// Len returns the length of the slice.
@ -14,8 +14,8 @@ func (p uint64Slice) Len() int {
return len(p)
}
// Less returns true when the ith element is smaller than the jth element
// of the slice, and returns false otherwise.
// Less returns true when the ith element is smaller than the jth element of
// the slice, and returns false otherwise.
func (p uint64Slice) Less(i, j int) bool {
return p[i] < p[j]
}