Catchup to python-herald schema. Plus lots of refactoring. #49
3 changed files with 23 additions and 15 deletions
|
@ -52,7 +52,7 @@ func (kv *BlockTxsValue) Pack(buf []byte, order binary.ByteOrder) ([]byte, error
|
||||||
func (kv *BlockTxsValue) Unpack(buf []byte, order binary.ByteOrder) ([]byte, error) {
|
func (kv *BlockTxsValue) Unpack(buf []byte, order binary.ByteOrder) ([]byte, error) {
|
||||||
offset := 0
|
offset := 0
|
||||||
kv.TxHashes = make([]*chainhash.Hash, len(buf)/32)
|
kv.TxHashes = make([]*chainhash.Hash, len(buf)/32)
|
||||||
for i, _ := range kv.TxHashes {
|
for i := range kv.TxHashes {
|
||||||
kv.TxHashes[i] = (*chainhash.Hash)(buf[offset:32])
|
kv.TxHashes[i] = (*chainhash.Hash)(buf[offset:32])
|
||||||
offset += 32
|
offset += 32
|
||||||
}
|
}
|
||||||
|
|
|
@ -3260,11 +3260,11 @@ func (kv *TrendingNotificationKey) PackKey() []byte {
|
||||||
func (kv *TrendingNotificationKey) UnpackKey(buf []byte) {
|
func (kv *TrendingNotificationKey) UnpackKey(buf []byte) {
|
||||||
// b'>L20s'
|
// b'>L20s'
|
||||||
offset := 0
|
offset := 0
|
||||||
kv.Prefix = buf[offset:1]
|
kv.Prefix = buf[offset : offset+1]
|
||||||
offset += 1
|
offset += 1
|
||||||
kv.Height = binary.BigEndian.Uint32(buf[offset:])
|
kv.Height = binary.BigEndian.Uint32(buf[offset:])
|
||||||
offset += 4
|
offset += 4
|
||||||
kv.ClaimHash = buf[offset:20]
|
kv.ClaimHash = buf[offset : offset+20]
|
||||||
offset += 20
|
offset += 20
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3318,9 +3318,9 @@ func (kv *MempoolTxKey) Pack(fields int) []byte {
|
||||||
func (kv *MempoolTxKey) UnpackKey(buf []byte) {
|
func (kv *MempoolTxKey) UnpackKey(buf []byte) {
|
||||||
// b'>32s'
|
// b'>32s'
|
||||||
offset := 0
|
offset := 0
|
||||||
kv.Prefix = buf[offset:1]
|
kv.Prefix = buf[offset : offset+1]
|
||||||
offset += 1
|
offset += 1
|
||||||
kv.TxHash = buf[offset:32]
|
kv.TxHash = buf[offset : offset+32]
|
||||||
offset += 32
|
offset += 32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3374,7 +3374,7 @@ func (kv *TouchedHashXKey) PackKey() []byte {
|
||||||
func (kv *TouchedHashXKey) UnpackKey(buf []byte) {
|
func (kv *TouchedHashXKey) UnpackKey(buf []byte) {
|
||||||
// b'>L'
|
// b'>L'
|
||||||
offset := 0
|
offset := 0
|
||||||
kv.Prefix = buf[offset:1]
|
kv.Prefix = buf[offset : offset+1]
|
||||||
offset += 1
|
offset += 1
|
||||||
kv.Height = binary.BigEndian.Uint32(buf[offset:])
|
kv.Height = binary.BigEndian.Uint32(buf[offset:])
|
||||||
offset += 4
|
offset += 4
|
||||||
|
@ -3395,7 +3395,7 @@ func (kv *TouchedHashXValue) UnpackValue(buf []byte) {
|
||||||
// variable length bytes
|
// variable length bytes
|
||||||
n := len(buf)
|
n := len(buf)
|
||||||
for i, offset := 0, 0; offset+11 <= n; i, offset = i+1, offset+11 {
|
for i, offset := 0, 0; offset+11 <= n; i, offset = i+1, offset+11 {
|
||||||
kv.TouchedHashXs[i] = buf[offset:11]
|
kv.TouchedHashXs[i] = buf[offset : offset+11]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3432,9 +3432,9 @@ func (kv *HashXStatusKey) PackKey() []byte {
|
||||||
func (kv *HashXStatusKey) UnpackKey(buf []byte) {
|
func (kv *HashXStatusKey) UnpackKey(buf []byte) {
|
||||||
// b'>20s'
|
// b'>20s'
|
||||||
offset := 0
|
offset := 0
|
||||||
kv.Prefix = buf[offset:1]
|
kv.Prefix = buf[offset : offset+1]
|
||||||
offset += 1
|
offset += 1
|
||||||
kv.HashX = buf[offset:20]
|
kv.HashX = buf[offset : offset+20]
|
||||||
offset += 20
|
offset += 20
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3450,7 +3450,7 @@ func (kv *HashXStatusValue) PackValue() []byte {
|
||||||
func (kv *HashXStatusValue) UnpackValue(buf []byte) {
|
func (kv *HashXStatusValue) UnpackValue(buf []byte) {
|
||||||
// b'32s'
|
// b'32s'
|
||||||
offset := 0
|
offset := 0
|
||||||
kv.Status = buf[offset:32]
|
kv.Status = buf[offset : offset+32]
|
||||||
offset += 32
|
offset += 32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -106,12 +106,20 @@ func testGenericOptions(options *dbpkg.IterOptions, filePath string, prefix byte
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if numPartials != kv.Key.NumFields() {
|
||||||
|
t.Errorf("key reports %v fields but %v expected", kv.Key.NumFields(), numPartials)
|
||||||
|
}
|
||||||
for j := 1; j <= numPartials; j++ {
|
for j := 1; j <= numPartials; j++ {
|
||||||
keyPartial, _ := options.Serializer.PackPartialKey(kv.Key, j)
|
keyPartial, _ := options.Serializer.PackPartialKey(kv.Key, j)
|
||||||
// Check pack partial for sanity
|
// Check pack partial for sanity
|
||||||
if !bytes.HasPrefix(gotKey, keyPartial) {
|
if j < numPartials {
|
||||||
// || (!bytes.HasSuffix(gotKey, []byte{0}) && bytes.Equal(gotKey, keyPartial))
|
if !bytes.HasPrefix(gotKey, keyPartial) || (len(keyPartial) >= len(gotKey)) {
|
||||||
t.Errorf("%+v should be prefix of %+v\n", keyPartial, gotKey)
|
t.Errorf("%+v should be prefix of %+v\n", keyPartial, gotKey)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !bytes.Equal(gotKey, keyPartial) {
|
||||||
|
t.Errorf("%+v should be equal to %+v\n", keyPartial, gotKey)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -243,7 +251,7 @@ func TestTXOToClaim(t *testing.T) {
|
||||||
|
|
||||||
func TestClaimShortID(t *testing.T) {
|
func TestClaimShortID(t *testing.T) {
|
||||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimShortIdPrefix)
|
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimShortIdPrefix)
|
||||||
testGeneric(filePath, prefixes.ClaimShortIdPrefix, 3)(t)
|
testGeneric(filePath, prefixes.ClaimShortIdPrefix, 4)(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClaimToChannel(t *testing.T) {
|
func TestClaimToChannel(t *testing.T) {
|
||||||
|
@ -313,7 +321,7 @@ func TestClaimDiff(t *testing.T) {
|
||||||
|
|
||||||
func TestUTXO(t *testing.T) {
|
func TestUTXO(t *testing.T) {
|
||||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.UTXO)
|
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.UTXO)
|
||||||
testGeneric(filePath, prefixes.UTXO, 1)(t)
|
testGeneric(filePath, prefixes.UTXO, 3)(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHashXUTXO(t *testing.T) {
|
func TestHashXUTXO(t *testing.T) {
|
||||||
|
|
Loading…
Reference in a new issue