gcs: pre-allocate capacity of slice for uncompressed filter values

This change will reduce the total number of allocations required to
create/store the allocated filter as we’ll now perform a _single_
allocation, rather than one each time the the dynamically size slice
reaches capacity.
This commit is contained in:
Olaoluwa Osuntokun 2017-04-27 20:36:12 -07:00
parent 0f2eb80fdb
commit c59d7e8fba

View file

@ -76,7 +76,7 @@ func BuildGCSFilter(P uint8, key [KeySize]byte, data [][]byte) (*Filter, error)
f.modulusNP = uint64(f.n) * f.modulusP f.modulusNP = uint64(f.n) * f.modulusP
// Build the filter. // Build the filter.
var values uint64Slice values := make(uint64Slice, 0, len(data))
b := bstream.NewBStreamWriter(0) b := bstream.NewBStreamWriter(0)
// Insert the hash (modulo N*P) of each data element into a slice and // Insert the hash (modulo N*P) of each data element into a slice and
@ -260,7 +260,7 @@ func (f *Filter) MatchAny(key [KeySize]byte, data [][]byte) (bool, error) {
b := bstream.NewBStreamReader(filterData) b := bstream.NewBStreamReader(filterData)
// Create an uncompressed filter of the search values. // Create an uncompressed filter of the search values.
var values uint64Slice values := make(uint64Slice, 0, len(data))
for _, d := range data { for _, d := range data {
v := siphash.Sum64(d, &key) % f.modulusNP v := siphash.Sum64(d, &key) % f.modulusNP
values = append(values, v) values = append(values, v)