ClaimExpiration
This commit is contained in:
parent
dbbf1205e7
commit
207ebd5608
4 changed files with 191 additions and 7 deletions
|
@ -525,9 +525,9 @@ class ClaimExpirationValue(typing.NamedTuple):
|
|||
|
||||
type ClaimExpirationKey struct {
|
||||
Prefix []byte `json:"prefix"`
|
||||
Expiration int32 `json:"expiration"`
|
||||
TxNum int32 `json:"tx_num"`
|
||||
Position int32 `json:"position"`
|
||||
Expiration uint32 `json:"expiration"`
|
||||
TxNum uint32 `json:"tx_num"`
|
||||
Position uint16 `json:"position"`
|
||||
}
|
||||
|
||||
type ClaimExpirationValue struct {
|
||||
|
@ -535,6 +535,96 @@ type ClaimExpirationValue struct {
|
|||
NormalizedName string `json:"normalized_name"`
|
||||
}
|
||||
|
||||
func (k *ClaimExpirationKey) PackKey() []byte {
|
||||
prefixLen := 1
|
||||
// b'>LLH'
|
||||
n := prefixLen + 4 + 4 + 2
|
||||
key := make([]byte, n)
|
||||
copy(key, k.Prefix)
|
||||
binary.BigEndian.PutUint32(key[prefixLen:], k.Expiration)
|
||||
binary.BigEndian.PutUint32(key[prefixLen+4:], k.TxNum)
|
||||
binary.BigEndian.PutUint16(key[prefixLen+8:], k.Position)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
func (v *ClaimExpirationValue) PackValue() []byte {
|
||||
nameLen := len(v.NormalizedName)
|
||||
n := 20 + 2 + nameLen
|
||||
value := make([]byte, n)
|
||||
copy(value, v.ClaimHash)
|
||||
binary.BigEndian.PutUint16(value[20:], uint16(nameLen))
|
||||
copy(value[22:], []byte(v.NormalizedName))
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func ClaimExpirationKeyPackPartialNFields(nFields int) func(*ClaimExpirationKey) []byte {
|
||||
return func(u *ClaimExpirationKey) []byte {
|
||||
return ClaimExpirationKeyPackPartial(u, nFields)
|
||||
}
|
||||
}
|
||||
|
||||
func ClaimExpirationKeyPackPartial(k *ClaimExpirationKey, nFields int) []byte {
|
||||
// Limit nFields between 0 and number of fields, we always at least need
|
||||
// the prefix, and we never need to iterate past the number of fields.
|
||||
if nFields > 3 {
|
||||
nFields = 3
|
||||
}
|
||||
if nFields < 0 {
|
||||
nFields = 0
|
||||
}
|
||||
|
||||
// b'>4sLH'
|
||||
prefixLen := 1
|
||||
var n = prefixLen
|
||||
for i := 0; i <= nFields; i++ {
|
||||
switch i {
|
||||
case 1:
|
||||
n += 4
|
||||
case 2:
|
||||
n += 4
|
||||
case 3:
|
||||
n += 2
|
||||
}
|
||||
}
|
||||
|
||||
key := make([]byte, n)
|
||||
|
||||
for i := 0; i <= nFields; i++ {
|
||||
switch i {
|
||||
case 0:
|
||||
copy(key, k.Prefix)
|
||||
case 1:
|
||||
binary.BigEndian.PutUint32(key[prefixLen:], k.Expiration)
|
||||
case 2:
|
||||
binary.BigEndian.PutUint32(key[prefixLen+4:], k.TxNum)
|
||||
case 3:
|
||||
binary.BigEndian.PutUint16(key[prefixLen+8:], k.Position)
|
||||
}
|
||||
}
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
func ClaimExpirationKeyUnpack(key []byte) *ClaimExpirationKey {
|
||||
prefixLen := 1
|
||||
return &ClaimExpirationKey{
|
||||
Prefix: key[:prefixLen],
|
||||
Expiration: binary.BigEndian.Uint32(key[prefixLen:]),
|
||||
TxNum: binary.BigEndian.Uint32(key[prefixLen+4:]),
|
||||
Position: binary.BigEndian.Uint16(key[prefixLen+8:]),
|
||||
}
|
||||
}
|
||||
|
||||
func ClaimExpirationValueUnpack(value []byte) *ClaimExpirationValue {
|
||||
nameLen := binary.BigEndian.Uint16(value[20:])
|
||||
return &ClaimExpirationValue{
|
||||
ClaimHash: value[:20],
|
||||
NormalizedName: string(value[22 : 22+nameLen]),
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
class ClaimTakeoverKey(typing.NamedTuple):
|
||||
normalized_name: str
|
||||
|
@ -1784,7 +1874,7 @@ func UnpackGenericKey(key []byte) (byte, interface{}, error) {
|
|||
case EffectiveAmount:
|
||||
return EffectiveAmount, EffectiveAmountKeyUnpack(key), nil
|
||||
case ClaimExpiration:
|
||||
return 0x0, nil, errors.Base("key unpack function for %v not implemented", firstByte)
|
||||
return ClaimExpiration, ClaimExpirationKeyUnpack(key), nil
|
||||
|
||||
case ClaimTakeover:
|
||||
return ClaimTakeover, ClaimTakeoverKeyUnpack(key), nil
|
||||
|
@ -1849,7 +1939,7 @@ func UnpackGenericValue(key, value []byte) (byte, interface{}, error) {
|
|||
case EffectiveAmount:
|
||||
return EffectiveAmount, EffectiveAmountValueUnpack(value), nil
|
||||
case ClaimExpiration:
|
||||
return 0x0, nil, errors.Base("value unpack not implemented for key %v", key)
|
||||
return ClaimExpiration, ClaimExpirationValueUnpack(value), nil
|
||||
|
||||
case ClaimTakeover:
|
||||
return ClaimTakeover, ClaimTakeoverValueUnpack(value), nil
|
||||
|
|
|
@ -44,6 +44,90 @@ func testInit(filePath string) (*grocksdb.DB, [][]string, func()) {
|
|||
return db, records, toDefer
|
||||
}
|
||||
|
||||
func TestClaimExpiration(t *testing.T) {
|
||||
|
||||
filePath := "../../resources/claim_expiration.csv"
|
||||
|
||||
wOpts := grocksdb.NewDefaultWriteOptions()
|
||||
db, records, toDefer := testInit(filePath)
|
||||
defer toDefer()
|
||||
for _, record := range records {
|
||||
key, err := hex.DecodeString(record[0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
val, err := hex.DecodeString(record[1])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
db.Put(wOpts, key, val)
|
||||
}
|
||||
// test prefix
|
||||
options := dbpkg.NewIterateOptions().WithPrefix([]byte{prefixes.ClaimExpiration}).WithIncludeValue(true)
|
||||
ch := dbpkg.Iter(db, options)
|
||||
var i = 0
|
||||
for kv := range ch {
|
||||
// log.Println(kv.Key)
|
||||
gotKey := kv.Key.(*prefixes.ClaimExpirationKey).PackKey()
|
||||
|
||||
keyPartial1 := prefixes.ClaimExpirationKeyPackPartial(kv.Key.(*prefixes.ClaimExpirationKey), 1)
|
||||
keyPartial2 := prefixes.ClaimExpirationKeyPackPartial(kv.Key.(*prefixes.ClaimExpirationKey), 2)
|
||||
keyPartial3 := prefixes.ClaimExpirationKeyPackPartial(kv.Key.(*prefixes.ClaimExpirationKey), 3)
|
||||
|
||||
// Check pack partial for sanity
|
||||
if !bytes.HasPrefix(gotKey, keyPartial1) {
|
||||
t.Errorf("%+v should be prefix of %+v\n", keyPartial1, gotKey)
|
||||
}
|
||||
if !bytes.HasPrefix(gotKey, keyPartial2) {
|
||||
t.Errorf("%+v should be prefix of %+v\n", keyPartial2, gotKey)
|
||||
}
|
||||
if !bytes.HasPrefix(gotKey, keyPartial3) {
|
||||
t.Errorf("%+v should be prefix of %+v\n", keyPartial3, gotKey)
|
||||
}
|
||||
|
||||
got := kv.Value.(*prefixes.ClaimExpirationValue).PackValue()
|
||||
wantKey, err := hex.DecodeString(records[i][0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
want, err := hex.DecodeString(records[i][1])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
if !bytes.Equal(gotKey, wantKey) {
|
||||
t.Errorf("gotKey: %+v, wantKey: %+v\n", got, want)
|
||||
}
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got: %+v, want: %+v\n", got, want)
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
// Test start / stop
|
||||
start, err := hex.DecodeString(records[0][0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
stop, err := hex.DecodeString(records[9][0])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
options2 := dbpkg.NewIterateOptions().WithStart(start).WithStop(stop).WithIncludeValue(true)
|
||||
ch2 := dbpkg.Iter(db, options2)
|
||||
i = 0
|
||||
for kv := range ch2 {
|
||||
got := kv.Value.(*prefixes.ClaimExpirationValue).PackValue()
|
||||
want, err := hex.DecodeString(records[i][1])
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got: %+v, want: %+v\n", got, want)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaimTakeover(t *testing.T) {
|
||||
|
||||
filePath := "../../resources/claim_takeover.csv"
|
||||
|
|
4
main.go
4
main.go
|
@ -38,7 +38,7 @@ func main() {
|
|||
|
||||
options := &db.IterOptions{
|
||||
FillCache: false,
|
||||
Prefix: []byte{prefixes.ClaimTakeover},
|
||||
Prefix: []byte{prefixes.ClaimExpiration},
|
||||
Start: nil,
|
||||
Stop: nil,
|
||||
IncludeStart: true,
|
||||
|
@ -49,7 +49,7 @@ func main() {
|
|||
RawValue: true,
|
||||
}
|
||||
|
||||
db.ReadWriteRawN(dbVal, options, "./resources/claim_takeover.csv", 10)
|
||||
db.ReadWriteRawN(dbVal, options, "./resources/claim_expiration.csv", 10)
|
||||
|
||||
return
|
||||
}
|
||||
|
|
10
resources/claim_expiration.csv
Normal file
10
resources/claim_expiration.csv
Normal file
|
@ -0,0 +1,10 @@
|
|||
4f00222fbd00162aa70000,c78ac4c326cd43cdc0c844b7cea13659449ab3e40015746573742d70686f746f2d7374726173626f757267
|
||||
4f002230a700162f600000,ebf95f7fdb89db5467bb1b88ea3b0f0f7ee5ce360003636e63
|
||||
4f002230e3001630960000,a6f91a86837ab84a4cf0d2dcbe94704a528cf820000f776f6e646572776f6d616e31393933
|
||||
4f002231e8001635e60000,9673cc2a1aac64d7b2742705abfb09fca30d7e0500056d6d61736b
|
||||
4f00223246001638a80000,c39342066646dc50f1a9954b41684d157b035dac00036f6e65
|
||||
4f00223494001645ef0001,4689c1ccb4420309f93ab98799b28c49fa4d3809000a65617379737472656574
|
||||
4f002236df0016529a0000,f1628d66ae52295590b72b9a0b3a3527642a532600137465737470756230332d32312d323031372d32
|
||||
4f002236e10016529d0000,a4c61ced261ab571bdb3410ae140bec6c31f14ce00117465737470756230332d32312d32303137
|
||||
4f00223774001655960000,2327bcb6d7578a2669e416b5aa185fe14ee8e03e00056569676874
|
||||
4f00223a04001664200000,f69099600bdca9b062ba60432dba3c0ca2241167002c686973746f72792d6f662d6672696564726963682d69692d6f662d707275737369612d766f6c756d652d3137
|
|
Loading…
Reference in a new issue