ClaimTakeover

This commit is contained in:
Jeffrey Picard 2022-01-13 18:24:09 -05:00
parent 6d4615ce31
commit dbbf1205e7
4 changed files with 171 additions and 5 deletions

View file

@ -555,7 +555,7 @@ type ClaimTakeoverKey struct {
type ClaimTakeoverValue struct {
ClaimHash []byte `json:"claim_hash"`
Height int32 `json:"height"`
Height uint32 `json:"height"`
}
func (v *ClaimTakeoverValue) String() string {
@ -567,6 +567,84 @@ func (v *ClaimTakeoverValue) String() string {
)
}
func (k *ClaimTakeoverKey) PackKey() []byte {
prefixLen := 1
nameLen := len(k.NormalizedName)
n := prefixLen + 2 + nameLen
key := make([]byte, n)
copy(key, k.Prefix)
binary.BigEndian.PutUint16(key[prefixLen:], uint16(nameLen))
copy(key[prefixLen+2:], []byte(k.NormalizedName))
return key
}
func (v *ClaimTakeoverValue) PackValue() []byte {
// b'>20sL'
value := make([]byte, 24)
copy(value, v.ClaimHash[:20])
binary.BigEndian.PutUint32(value[20:], uint32(v.Height))
return value
}
func ClaimTakeoverKeyPackPartialNFields(nFields int) func(*ClaimTakeoverKey) []byte {
return func(u *ClaimTakeoverKey) []byte {
return ClaimTakeoverKeyPackPartial(u, nFields)
}
}
func ClaimTakeoverKeyPackPartial(k *ClaimTakeoverKey, nFields int) []byte {
// Limit nFields between 0 and number of fields, we always at least need
// the prefix, and we never need to iterate past the number of fields.
if nFields > 1 {
nFields = 1
}
if nFields < 0 {
nFields = 0
}
prefixLen := 1
nameLen := len(k.NormalizedName)
var n = prefixLen
for i := 0; i <= nFields; i++ {
switch i {
case 1:
n += 2 + nameLen
}
}
key := make([]byte, n)
for i := 0; i <= nFields; i++ {
switch i {
case 0:
copy(key, k.Prefix)
case 1:
binary.BigEndian.PutUint16(key[prefixLen:], uint16(nameLen))
copy(key[prefixLen+2:], []byte(k.NormalizedName))
}
}
return key
}
func ClaimTakeoverKeyUnpack(key []byte) *ClaimTakeoverKey {
prefixLen := 1
nameLen := binary.BigEndian.Uint16(key[prefixLen:])
return &ClaimTakeoverKey{
Prefix: key[:prefixLen],
NormalizedName: string(key[prefixLen+2 : prefixLen+2+int(nameLen)]),
}
}
func ClaimTakeoverValueUnpack(value []byte) *ClaimTakeoverValue {
return &ClaimTakeoverValue{
ClaimHash: value[:20],
Height: binary.BigEndian.Uint32(value[20:]),
}
}
/*
class PendingActivationKey(typing.NamedTuple):
@ -1706,9 +1784,10 @@ func UnpackGenericKey(key []byte) (byte, interface{}, error) {
case EffectiveAmount:
return EffectiveAmount, EffectiveAmountKeyUnpack(key), nil
case ClaimExpiration:
return 0x0, nil, errors.Base("key unpack function for %v not implemented", firstByte)
case ClaimTakeover:
return 0x0, nil, errors.Base("key unpack function for %v not implemented", firstByte)
return ClaimTakeover, ClaimTakeoverKeyUnpack(key), nil
case PendingActivation:
return PendingActivation, PendingActivationKeyUnpack(key), nil
case ActivatedClaimAndSupport:
@ -1770,9 +1849,10 @@ func UnpackGenericValue(key, value []byte) (byte, interface{}, error) {
case EffectiveAmount:
return EffectiveAmount, EffectiveAmountValueUnpack(value), nil
case ClaimExpiration:
return 0x0, nil, errors.Base("value unpack not implemented for key %v", key)
case ClaimTakeover:
return 0x0, nil, errors.Base("value unpack not implemented for key %v", key)
return ClaimTakeover, ClaimTakeoverValueUnpack(value), nil
case PendingActivation:
return PendingActivation, PendingActivationValueUnpack(value), nil
case ActivatedClaimAndSupport:

View file

@ -44,6 +44,82 @@ func testInit(filePath string) (*grocksdb.DB, [][]string, func()) {
return db, records, toDefer
}
func TestClaimTakeover(t *testing.T) {
filePath := "../../resources/claim_takeover.csv"
wOpts := grocksdb.NewDefaultWriteOptions()
db, records, toDefer := testInit(filePath)
defer toDefer()
for _, record := range records {
key, err := hex.DecodeString(record[0])
if err != nil {
log.Println(err)
}
val, err := hex.DecodeString(record[1])
if err != nil {
log.Println(err)
}
db.Put(wOpts, key, val)
}
// test prefix
options := dbpkg.NewIterateOptions().WithPrefix([]byte{prefixes.ClaimTakeover}).WithIncludeValue(true)
ch := dbpkg.Iter(db, options)
var i = 0
for kv := range ch {
// log.Println(kv.Key)
gotKey := kv.Key.(*prefixes.ClaimTakeoverKey).PackKey()
keyPartial1 := prefixes.ClaimTakeoverKeyPackPartial(kv.Key.(*prefixes.ClaimTakeoverKey), 1)
// Check pack partial for sanity
if !bytes.HasPrefix(gotKey, keyPartial1) {
t.Errorf("%+v should be prefix of %+v\n", keyPartial1, gotKey)
}
got := kv.Value.(*prefixes.ClaimTakeoverValue).PackValue()
wantKey, err := hex.DecodeString(records[i][0])
if err != nil {
log.Println(err)
}
want, err := hex.DecodeString(records[i][1])
if err != nil {
log.Println(err)
}
if !bytes.Equal(gotKey, wantKey) {
t.Errorf("gotKey: %+v, wantKey: %+v\n", got, want)
}
if !bytes.Equal(got, want) {
t.Errorf("got: %+v, want: %+v\n", got, want)
}
i++
}
// Test start / stop
start, err := hex.DecodeString(records[0][0])
if err != nil {
log.Println(err)
}
stop, err := hex.DecodeString(records[9][0])
if err != nil {
log.Println(err)
}
options2 := dbpkg.NewIterateOptions().WithStart(start).WithStop(stop).WithIncludeValue(true)
ch2 := dbpkg.Iter(db, options2)
i = 0
for kv := range ch2 {
got := kv.Value.(*prefixes.ClaimTakeoverValue).PackValue()
want, err := hex.DecodeString(records[i][1])
if err != nil {
log.Println(err)
}
if !bytes.Equal(got, want) {
t.Errorf("got: %+v, want: %+v\n", got, want)
}
i++
}
}
func TestPendingActivation(t *testing.T) {
filePath := "../../resources/pending_activation.csv"

View file

@ -38,7 +38,7 @@ func main() {
options := &db.IterOptions{
FillCache: false,
Prefix: []byte{prefixes.PendingActivation},
Prefix: []byte{prefixes.ClaimTakeover},
Start: nil,
Stop: nil,
IncludeStart: true,
@ -49,7 +49,7 @@ func main() {
RawValue: true,
}
db.ReadWriteRawN(dbVal, options, "./resources/pending_activation.csv", 10)
db.ReadWriteRawN(dbVal, options, "./resources/claim_takeover.csv", 10)
return
}

View file

@ -0,0 +1,10 @@
50000100,a51d5c567412654e6d741114fea6fb851dec73800004831f
50000101,11158037afca9c2efabc3dff55e352bf1f5634c50004831f
50000102,a4a575934de77d8ec8589595d8cd91857e3cf5ba0004831f
50000103,f595a21fb597bd030defefda3df9f8f4a3e0cb860004831f
50000104,682ccb0518a6bd00c955949d9ef330d3ac18cb800004831f
50000105,078a435851bf97c5cc36e8b03e3208a30d27679f0004831f
50000106,1a4a75246a766cf21a629f619bc5bcb531de7a5a0004831f
50000107,b144ad496b29b9c12c316f319773adcdd4c9bce20004831f
50000108,af2a09232fc6bf664088d65da42fe0345b4589600004831f
50000109,7b2ab15758c519116fb9ba9331a3b9ee7530831f0004831f
1 50000100 a51d5c567412654e6d741114fea6fb851dec73800004831f
2 50000101 11158037afca9c2efabc3dff55e352bf1f5634c50004831f
3 50000102 a4a575934de77d8ec8589595d8cd91857e3cf5ba0004831f
4 50000103 f595a21fb597bd030defefda3df9f8f4a3e0cb860004831f
5 50000104 682ccb0518a6bd00c955949d9ef330d3ac18cb800004831f
6 50000105 078a435851bf97c5cc36e8b03e3208a30d27679f0004831f
7 50000106 1a4a75246a766cf21a629f619bc5bcb531de7a5a0004831f
8 50000107 b144ad496b29b9c12c316f319773adcdd4c9bce20004831f
9 50000108 af2a09232fc6bf664088d65da42fe0345b4589600004831f
10 50000109 7b2ab15758c519116fb9ba9331a3b9ee7530831f0004831f