ChannelToClaim
This commit is contained in:
parent
204c0f78d9
commit
f48337e4b9
4 changed files with 199 additions and 4 deletions
|
@ -407,14 +407,107 @@ type ChannelToClaimKey struct {
|
||||||
Prefix []byte `json:"prefix"`
|
Prefix []byte `json:"prefix"`
|
||||||
SigningHash []byte `json:"signing_hash"`
|
SigningHash []byte `json:"signing_hash"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
TxNum int32 `json:"tx_num"`
|
TxNum uint32 `json:"tx_num"`
|
||||||
Position int32 `json:"position"`
|
Position uint16 `json:"position"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ChannelToClaimValue struct {
|
type ChannelToClaimValue struct {
|
||||||
ClaimHash []byte `json:"claim_hash"`
|
ClaimHash []byte `json:"claim_hash"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k *ChannelToClaimKey) PackKey() []byte {
|
||||||
|
prefixLen := 1
|
||||||
|
nameLen := len(k.Name)
|
||||||
|
n := prefixLen + 20 + 2 + nameLen + 4 + 2
|
||||||
|
key := make([]byte, n)
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
copy(key[prefixLen:], k.SigningHash[:20])
|
||||||
|
binary.BigEndian.PutUint16(key[prefixLen+20:], uint16(nameLen))
|
||||||
|
copy(key[prefixLen+22:], []byte(k.Name[:nameLen]))
|
||||||
|
binary.BigEndian.PutUint32(key[prefixLen+22+nameLen:], k.TxNum)
|
||||||
|
binary.BigEndian.PutUint16(key[prefixLen+22+nameLen+4:], k.Position)
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *ChannelToClaimValue) PackValue() []byte {
|
||||||
|
value := make([]byte, 20)
|
||||||
|
copy(value, v.ClaimHash[:20])
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelToClaimKeyPackPartialNFields(nFields int) func(*ChannelToClaimKey) []byte {
|
||||||
|
return func(u *ChannelToClaimKey) []byte {
|
||||||
|
return ChannelToClaimKeyPackPartial(u, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelToClaimKeyPackPartial(k *ChannelToClaimKey, nFields int) []byte {
|
||||||
|
// Limit nFields between 0 and number of fields, we always at least need
|
||||||
|
// the prefix, and we never need to iterate past the number of fields.
|
||||||
|
if nFields > 4 {
|
||||||
|
nFields = 4
|
||||||
|
}
|
||||||
|
if nFields < 0 {
|
||||||
|
nFields = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
nameLen := len(k.Name)
|
||||||
|
prefixLen := 1
|
||||||
|
var n = prefixLen
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 1:
|
||||||
|
n += 20
|
||||||
|
case 2:
|
||||||
|
n += 2 + nameLen
|
||||||
|
case 3:
|
||||||
|
n += 4
|
||||||
|
case 4:
|
||||||
|
n += 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key := make([]byte, n)
|
||||||
|
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
case 1:
|
||||||
|
copy(key[prefixLen:], k.SigningHash[:20])
|
||||||
|
case 2:
|
||||||
|
binary.BigEndian.PutUint16(key[prefixLen+20:], uint16(nameLen))
|
||||||
|
copy(key[prefixLen+22:], []byte(k.Name))
|
||||||
|
case 3:
|
||||||
|
binary.BigEndian.PutUint32(key[prefixLen+22+nameLen:], k.TxNum)
|
||||||
|
case 4:
|
||||||
|
binary.BigEndian.PutUint16(key[prefixLen+22+nameLen+4:], k.Position)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelToClaimKeyUnpack(key []byte) *ChannelToClaimKey {
|
||||||
|
prefixLen := 1
|
||||||
|
nameLen := int(binary.BigEndian.Uint16(key[prefixLen+20:]))
|
||||||
|
return &ChannelToClaimKey{
|
||||||
|
Prefix: key[:prefixLen],
|
||||||
|
SigningHash: key[prefixLen : prefixLen+20],
|
||||||
|
Name: string(key[prefixLen+22 : prefixLen+22+nameLen]),
|
||||||
|
TxNum: binary.BigEndian.Uint32(key[prefixLen+22+nameLen:]),
|
||||||
|
Position: binary.BigEndian.Uint16(key[prefixLen+22+nameLen+4:]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelToClaimValueUnpack(value []byte) *ChannelToClaimValue {
|
||||||
|
return &ChannelToClaimValue{
|
||||||
|
ClaimHash: value[:20],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
class ChannelCountKey(typing.NamedTuple):
|
class ChannelCountKey(typing.NamedTuple):
|
||||||
|
@ -2031,7 +2124,9 @@ func UnpackGenericKey(key []byte) (byte, interface{}, error) {
|
||||||
case TXOToClaim:
|
case TXOToClaim:
|
||||||
|
|
||||||
case ClaimToChannel:
|
case ClaimToChannel:
|
||||||
|
return 0x0, nil, errors.Base("key unpack function for %v not implemented", firstByte)
|
||||||
case ChannelToClaim:
|
case ChannelToClaim:
|
||||||
|
return ChannelToClaim, ChannelToClaimKeyUnpack(key), nil
|
||||||
|
|
||||||
case ClaimShortIdPrefix:
|
case ClaimShortIdPrefix:
|
||||||
return 0x0, nil, errors.Base("key unpack function for %v not implemented", firstByte)
|
return 0x0, nil, errors.Base("key unpack function for %v not implemented", firstByte)
|
||||||
|
@ -2099,7 +2194,9 @@ func UnpackGenericValue(key, value []byte) (byte, interface{}, error) {
|
||||||
case TXOToClaim:
|
case TXOToClaim:
|
||||||
|
|
||||||
case ClaimToChannel:
|
case ClaimToChannel:
|
||||||
|
return 0x0, nil, errors.Base("value unpack not implemented for key %v", key)
|
||||||
case ChannelToClaim:
|
case ChannelToClaim:
|
||||||
|
return ChannelToClaim, ChannelToClaimValueUnpack(value), nil
|
||||||
|
|
||||||
case ClaimShortIdPrefix:
|
case ClaimShortIdPrefix:
|
||||||
return 0x0, nil, errors.Base("value unpack not implemented for key %v", key)
|
return 0x0, nil, errors.Base("value unpack not implemented for key %v", key)
|
||||||
|
|
|
@ -44,6 +44,94 @@ func testInit(filePath string) (*grocksdb.DB, [][]string, func()) {
|
||||||
return db, records, toDefer
|
return db, records, toDefer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestChannelToClaim(t *testing.T) {
|
||||||
|
|
||||||
|
filePath := "../../resources/channel_to_claim.csv"
|
||||||
|
|
||||||
|
wOpts := grocksdb.NewDefaultWriteOptions()
|
||||||
|
db, records, toDefer := testInit(filePath)
|
||||||
|
defer toDefer()
|
||||||
|
for _, record := range records {
|
||||||
|
key, err := hex.DecodeString(record[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
val, err := hex.DecodeString(record[1])
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
db.Put(wOpts, key, val)
|
||||||
|
}
|
||||||
|
// test prefix
|
||||||
|
options := dbpkg.NewIterateOptions().WithPrefix([]byte{prefixes.ChannelToClaim}).WithIncludeValue(true)
|
||||||
|
ch := dbpkg.Iter(db, options)
|
||||||
|
var i = 0
|
||||||
|
for kv := range ch {
|
||||||
|
// log.Println(kv.Key)
|
||||||
|
gotKey := kv.Key.(*prefixes.ChannelToClaimKey).PackKey()
|
||||||
|
|
||||||
|
keyPartial1 := prefixes.ChannelToClaimKeyPackPartial(kv.Key.(*prefixes.ChannelToClaimKey), 1)
|
||||||
|
keyPartial2 := prefixes.ChannelToClaimKeyPackPartial(kv.Key.(*prefixes.ChannelToClaimKey), 2)
|
||||||
|
keyPartial3 := prefixes.ChannelToClaimKeyPackPartial(kv.Key.(*prefixes.ChannelToClaimKey), 3)
|
||||||
|
keyPartial4 := prefixes.ChannelToClaimKeyPackPartial(kv.Key.(*prefixes.ChannelToClaimKey), 4)
|
||||||
|
|
||||||
|
// Check pack partial for sanity
|
||||||
|
if !bytes.HasPrefix(gotKey, keyPartial1) {
|
||||||
|
t.Errorf("%+v should be prefix of %+v\n", keyPartial1, gotKey)
|
||||||
|
}
|
||||||
|
if !bytes.HasPrefix(gotKey, keyPartial2) {
|
||||||
|
t.Errorf("%+v should be prefix of %+v\n", keyPartial2, gotKey)
|
||||||
|
}
|
||||||
|
if !bytes.HasPrefix(gotKey, keyPartial3) {
|
||||||
|
t.Errorf("%+v should be prefix of %+v\n", keyPartial3, gotKey)
|
||||||
|
}
|
||||||
|
if !bytes.HasPrefix(gotKey, keyPartial4) {
|
||||||
|
t.Errorf("%+v should be prefix of %+v\n", keyPartial4, gotKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
got := kv.Value.(*prefixes.ChannelToClaimValue).PackValue()
|
||||||
|
wantKey, err := hex.DecodeString(records[i][0])
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
want, err := hex.DecodeString(records[i][1])
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(gotKey, wantKey) {
|
||||||
|
t.Errorf("gotKey: %+v, wantKey: %+v\n", got, want)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(got, want) {
|
||||||
|
t.Errorf("got: %+v, want: %+v\n", got, want)
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test start / stop
|
||||||
|
start, err := hex.DecodeString(records[0][0])
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
stop, err := hex.DecodeString(records[9][0])
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
options2 := dbpkg.NewIterateOptions().WithStart(start).WithStop(stop).WithIncludeValue(true)
|
||||||
|
ch2 := dbpkg.Iter(db, options2)
|
||||||
|
i = 0
|
||||||
|
for kv := range ch2 {
|
||||||
|
got := kv.Value.(*prefixes.ChannelToClaimValue).PackValue()
|
||||||
|
want, err := hex.DecodeString(records[i][1])
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(got, want) {
|
||||||
|
t.Errorf("got: %+v, want: %+v\n", got, want)
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestClaimToSupport(t *testing.T) {
|
func TestClaimToSupport(t *testing.T) {
|
||||||
|
|
||||||
filePath := "../../resources/claim_to_support.csv"
|
filePath := "../../resources/claim_to_support.csv"
|
||||||
|
|
4
main.go
4
main.go
|
@ -38,7 +38,7 @@ func main() {
|
||||||
|
|
||||||
options := &db.IterOptions{
|
options := &db.IterOptions{
|
||||||
FillCache: false,
|
FillCache: false,
|
||||||
Prefix: []byte{prefixes.ClaimToSupport},
|
Prefix: []byte{prefixes.ChannelToClaim},
|
||||||
Start: nil,
|
Start: nil,
|
||||||
Stop: nil,
|
Stop: nil,
|
||||||
IncludeStart: true,
|
IncludeStart: true,
|
||||||
|
@ -49,7 +49,7 @@ func main() {
|
||||||
RawValue: true,
|
RawValue: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
db.ReadWriteRawN(dbVal, options, "./resources/claim_to_support.csv", 10)
|
db.ReadWriteRawN(dbVal, options, "./resources/channel_to_claim.csv", 10)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
10
resources/channel_to_claim.csv
Normal file
10
resources/channel_to_claim.csv
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
4a00009f35397ada0476b04c67978ad081b50833ed0005676866676800201f7f0000,2febc9f39e70fac69ce7504dc7fb9523c9617c68
|
||||||
|
4a0000cbef248847373c999de142bc2d7da4d014100013646f6d656e7a61696e2d657374616661646f720358fe5f0000,2b15c03bb512d84e7450b0fbdbc4db1f9b454137
|
||||||
|
4a0000eff19123a0b3087a0f059a5e0c10d74377560010636176652d73746f72792d7974702d3102fb1edf0000,dc07a33c5e8db91e5f27ea6cfc6415677c834d34
|
||||||
|
4a0000eff19123a0b3087a0f059a5e0c10d7437756001b636c6f646f72212d67616d652d6f662d7468726f6e65732d79747002fb1ed60000,c869ff2f82cc7d28d63cfe672de46042898d6a90
|
||||||
|
4a0000eff19123a0b3087a0f059a5e0c10d74377560020636f6e636c6176652d6f662d7468652d63686f73656e2d28776f772d6266612902fb1ecf0000,2d3225c7800a8e8098aa63c9a2c5ddd6ac30f968
|
||||||
|
4a0000eff19123a0b3087a0f059a5e0c10d7437756002077686f27732d746861742d706f6b656d6f6e2d697427732d636174657270696502fb1ed00000,627769e3f4f577261c3aec4addd05890747a5c8b
|
||||||
|
4a0000eff19123a0b3087a0f059a5e0c10d7437756002077686f27732d746861742d706f6b656d6f6e2d697427732d70696b616368752102fb1ed40000,45d8a2b4f512f4eb8bf6409454975235343d4b40
|
||||||
|
4a0000eff19123a0b3087a0f059a5e0c10d7437756002162726f636b2d74726965732d746f2d736176652d736861796d696e2d7573696e670341a2c40000,2c9d219c6ce4596cffb250950c3d94938f8df91b
|
||||||
|
4a0000eff19123a0b3087a0f059a5e0c10d74377560021706d64322d6578706c6f726572732d6f662d736b792d706c61797468726f7567680315ca790000,be33e079423884c11969b3fd8f7586dee8e3a73f
|
||||||
|
4a0000eff19123a0b3087a0f059a5e0c10d74377560023706d64322d6578706c6f726572732d6f662d736b792d706c61797468726f7567682d320315ca770000,0efd1fd5225df823ebe6bc2196d405db745f291c
|
|
Loading…
Reference in a new issue