HashXHistory

This commit is contained in:
Jeffrey Picard 2022-01-15 01:14:30 -05:00
parent 9311f859c8
commit 46cbf2280d
4 changed files with 301 additions and 112 deletions

View file

@ -167,6 +167,115 @@ type HashXUTXOValue struct {
HashX []byte `json:"hashx"`
}
//
// HashXUTXOKey / HashXUTXOValue
//
func (k *HashXUTXOKey) String() string {
return fmt.Sprintf(
"%s(short_tx_hash=%s, tx_num=%d, nout=%d)",
reflect.TypeOf(k),
hex.EncodeToString(k.ShortTXHash),
k.TxNum,
k.Nout,
)
}
func (v *HashXUTXOValue) String() string {
return fmt.Sprintf(
"%s(hashX=%s)",
reflect.TypeOf(v),
hex.EncodeToString(v.HashX),
)
}
func (k *HashXUTXOKey) PackKey() []byte {
prefixLen := 1
// b'>4sLH'
n := prefixLen + 4 + 4 + 2
key := make([]byte, n)
copy(key, k.Prefix)
copy(key[prefixLen:], k.ShortTXHash)
binary.BigEndian.PutUint32(key[prefixLen+4:], k.TxNum)
binary.BigEndian.PutUint16(key[prefixLen+8:], k.Nout)
return key
}
func (v *HashXUTXOValue) PackValue() []byte {
value := make([]byte, 11)
copy(value, v.HashX)
return value
}
// HashXUTXOKeyPackPartialNFields creates a pack partial key function for n fields.
func HashXUTXOKeyPackPartialNFields(nFields int) func(*HashXUTXOKey) []byte {
return func(u *HashXUTXOKey) []byte {
return HashXUTXOKeyPackPartial(u, nFields)
}
}
// HashXUTXOKeyPackPartial packs a variable number of fields into a byte
// array
func HashXUTXOKeyPackPartial(k *HashXUTXOKey, nFields int) []byte {
// Limit nFields between 0 and number of fields, we always at least need
// the prefix, and we never need to iterate past the number of fields.
if nFields > 3 {
nFields = 3
}
if nFields < 0 {
nFields = 0
}
// b'>4sLH'
prefixLen := 1
var n = prefixLen
for i := 0; i <= nFields; i++ {
switch i {
case 1:
n += 4
case 2:
n += 4
case 3:
n += 2
}
}
key := make([]byte, n)
for i := 0; i <= nFields; i++ {
switch i {
case 0:
copy(key, k.Prefix)
case 1:
copy(key[prefixLen:], k.ShortTXHash)
case 2:
binary.BigEndian.PutUint32(key[prefixLen+4:], k.TxNum)
case 3:
binary.BigEndian.PutUint16(key[prefixLen+8:], k.Nout)
}
}
return key
}
func HashXUTXOKeyUnpack(key []byte) *HashXUTXOKey {
prefixLen := 1
return &HashXUTXOKey{
Prefix: key[:prefixLen],
ShortTXHash: key[prefixLen : prefixLen+4],
TxNum: binary.BigEndian.Uint32(key[prefixLen+4:]),
Nout: binary.BigEndian.Uint16(key[prefixLen+8:]),
}
}
func HashXUTXOValueUnpack(value []byte) *HashXUTXOValue {
return &HashXUTXOValue{
HashX: value[:11],
}
}
/*
class HashXHistoryKey(NamedTuple):
hashX: bytes
@ -187,7 +296,104 @@ type HashXHistoryKey struct {
}
type HashXHistoryValue struct {
HashXes []uint32 `json:"hashxes"`
HashXes []uint16 `json:"hashxes"`
}
func (k *HashXHistoryKey) String() string {
return fmt.Sprintf(
"%s(hashx=%s, height=%d)",
reflect.TypeOf(k),
hex.EncodeToString(k.HashX),
k.Height,
)
}
func (k *HashXHistoryKey) PackKey() []byte {
prefixLen := 1
// b'>11sL'
n := prefixLen + 11 + 4
key := make([]byte, n)
copy(key, k.Prefix)
copy(key[prefixLen:], k.HashX)
binary.BigEndian.PutUint32(key[prefixLen+11:], k.Height)
return key
}
func (v *HashXHistoryValue) PackValue() []byte {
n := len(v.HashXes)
value := make([]byte, n*2)
for i, x := range v.HashXes {
binary.BigEndian.PutUint16(value[i*2:], x)
}
return value
}
// HashXHistoryKeyPackPartialNFields creates a pack partial key function for n fields.
func HashXHistoryKeyPackPartialNFields(nFields int) func(*HashXHistoryKey) []byte {
return func(u *HashXHistoryKey) []byte {
return HashXHistoryKeyPackPartial(u, nFields)
}
}
// HashXHistoryKeyPackPartial packs a variable number of fields into a byte
// array
func HashXHistoryKeyPackPartial(k *HashXHistoryKey, nFields int) []byte {
// Limit nFields between 0 and number of fields, we always at least need
// the prefix, and we never need to iterate past the number of fields.
if nFields > 2 {
nFields = 2
}
if nFields < 0 {
nFields = 0
}
prefixLen := 1
var n = prefixLen
for i := 0; i <= nFields; i++ {
switch i {
case 1:
n += 11
case 2:
n += 4
}
}
key := make([]byte, n)
for i := 0; i <= nFields; i++ {
switch i {
case 0:
copy(key, k.Prefix)
case 1:
copy(key[prefixLen:], k.HashX[:11])
case 2:
binary.BigEndian.PutUint32(key[prefixLen+11:], k.Height)
}
}
return key
}
func HashXHistoryKeyUnpack(key []byte) *HashXHistoryKey {
prefixLen := 1
return &HashXHistoryKey{
Prefix: key[:prefixLen],
HashX: key[prefixLen : prefixLen+11],
Height: binary.BigEndian.Uint32(key[prefixLen+11:]),
}
}
func HashXHistoryValueUnpack(value []byte) *HashXHistoryValue {
n := len(value) / 2
hashxes := make([]uint16, n)
for i := 0; i < n; i++ {
hashxes[i] = binary.BigEndian.Uint16(value[i*2:])
}
return &HashXHistoryValue{
HashXes: hashxes,
}
}
/*
@ -2490,115 +2696,6 @@ func TouchedOrDeletedClaimValueUnpack(value []byte) *TouchedOrDeletedClaimValue
}
}
//
// HashXUTXOKey / HashXUTXOValue
//
func (k *HashXUTXOKey) String() string {
return fmt.Sprintf(
"%s(short_tx_hash=%s, tx_num=%d, nout=%d)",
reflect.TypeOf(k),
hex.EncodeToString(k.ShortTXHash),
k.TxNum,
k.Nout,
)
}
func (v *HashXUTXOValue) String() string {
return fmt.Sprintf(
"%s(hashX=%s)",
reflect.TypeOf(v),
hex.EncodeToString(v.HashX),
)
}
func (k *HashXUTXOKey) PackKey() []byte {
prefixLen := 1
// b'>4sLH'
n := prefixLen + 4 + 4 + 2
key := make([]byte, n)
copy(key, k.Prefix)
copy(key[prefixLen:], k.ShortTXHash)
binary.BigEndian.PutUint32(key[prefixLen+4:], k.TxNum)
binary.BigEndian.PutUint16(key[prefixLen+8:], k.Nout)
return key
}
func (v *HashXUTXOValue) PackValue() []byte {
value := make([]byte, 11)
copy(value, v.HashX)
return value
}
// HashXUTXOKeyPackPartialNFields creates a pack partial key function for n fields.
func HashXUTXOKeyPackPartialNFields(nFields int) func(*HashXUTXOKey) []byte {
return func(u *HashXUTXOKey) []byte {
return HashXUTXOKeyPackPartial(u, nFields)
}
}
// HashXUTXOKeyPackPartial packs a variable number of fields into a byte
// array
func HashXUTXOKeyPackPartial(k *HashXUTXOKey, nFields int) []byte {
// Limit nFields between 0 and number of fields, we always at least need
// the prefix, and we never need to iterate past the number of fields.
if nFields > 3 {
nFields = 3
}
if nFields < 0 {
nFields = 0
}
// b'>4sLH'
prefixLen := 1
var n = prefixLen
for i := 0; i <= nFields; i++ {
switch i {
case 1:
n += 4
case 2:
n += 4
case 3:
n += 2
}
}
key := make([]byte, n)
for i := 0; i <= nFields; i++ {
switch i {
case 0:
copy(key, k.Prefix)
case 1:
copy(key[prefixLen:], k.ShortTXHash)
case 2:
binary.BigEndian.PutUint32(key[prefixLen+4:], k.TxNum)
case 3:
binary.BigEndian.PutUint16(key[prefixLen+8:], k.Nout)
}
}
return key
}
func HashXUTXOKeyUnpack(key []byte) *HashXUTXOKey {
prefixLen := 1
return &HashXUTXOKey{
Prefix: key[:prefixLen],
ShortTXHash: key[prefixLen : prefixLen+4],
TxNum: binary.BigEndian.Uint32(key[prefixLen+4:]),
Nout: binary.BigEndian.Uint16(key[prefixLen+8:]),
}
}
func HashXUTXOValueUnpack(value []byte) *HashXUTXOValue {
return &HashXUTXOValue{
HashX: value[:11],
}
}
//
// UTXOKey / UTXOValue
//
@ -2763,6 +2860,7 @@ func UnpackGenericKey(key []byte) (byte, interface{}, error) {
case HashXUTXO:
return UTXO, HashXUTXOKeyUnpack(key), nil
case HashXHistory:
return HashXHistory, HashXHistoryKeyUnpack(key), nil
case DBState:
case ChannelCount:
case SupportAmount:
@ -2838,6 +2936,7 @@ func UnpackGenericValue(key, value []byte) (byte, interface{}, error) {
case HashXUTXO:
return HashXUTXO, HashXUTXOValueUnpack(value), nil
case HashXHistory:
return HashXHistory, HashXHistoryValueUnpack(value), nil
case DBState:
case ChannelCount:
case SupportAmount:

View file

@ -44,6 +44,86 @@ func testInit(filePath string) (*grocksdb.DB, [][]string, func()) {
return db, records, toDefer
}
func TestHashXHistory(t *testing.T) {
filePath := "../../resources/hashx_history.csv"
wOpts := grocksdb.NewDefaultWriteOptions()
db, records, toDefer := testInit(filePath)
defer toDefer()
for _, record := range records {
key, err := hex.DecodeString(record[0])
if err != nil {
log.Println(err)
}
val, err := hex.DecodeString(record[1])
if err != nil {
log.Println(err)
}
db.Put(wOpts, key, val)
}
// test prefix
options := dbpkg.NewIterateOptions().WithPrefix([]byte{prefixes.HashXHistory}).WithIncludeValue(true)
ch := dbpkg.Iter(db, options)
var i = 0
for kv := range ch {
// log.Println(kv.Key)
gotKey := kv.Key.(*prefixes.HashXHistoryKey).PackKey()
keyPartial1 := prefixes.HashXHistoryKeyPackPartial(kv.Key.(*prefixes.HashXHistoryKey), 1)
keyPartial2 := prefixes.HashXHistoryKeyPackPartial(kv.Key.(*prefixes.HashXHistoryKey), 2)
// Check pack partial for sanity
if !bytes.HasPrefix(gotKey, keyPartial1) {
t.Errorf("%+v should be prefix of %+v\n", keyPartial1, gotKey)
}
if !bytes.HasPrefix(gotKey, keyPartial2) {
t.Errorf("%+v should be prefix of %+v\n", keyPartial2, gotKey)
}
got := kv.Value.(*prefixes.HashXHistoryValue).PackValue()
wantKey, err := hex.DecodeString(records[i][0])
if err != nil {
log.Println(err)
}
want, err := hex.DecodeString(records[i][1])
if err != nil {
log.Println(err)
}
if !bytes.Equal(gotKey, wantKey) {
t.Errorf("gotKey: %+v, wantKey: %+v\n", got, want)
}
if !bytes.Equal(got, want) {
t.Errorf("got: %+v, want: %+v\n", got, want)
}
i++
}
// Test start / stop
start, err := hex.DecodeString(records[0][0])
if err != nil {
log.Println(err)
}
stop, err := hex.DecodeString(records[9][0])
if err != nil {
log.Println(err)
}
options2 := dbpkg.NewIterateOptions().WithStart(start).WithStop(stop).WithIncludeValue(true)
ch2 := dbpkg.Iter(db, options2)
i = 0
for kv := range ch2 {
got := kv.Value.(*prefixes.HashXHistoryValue).PackValue()
want, err := hex.DecodeString(records[i][1])
if err != nil {
log.Println(err)
}
if !bytes.Equal(got, want) {
t.Errorf("got: %+v, want: %+v\n", got, want)
}
i++
}
}
func TestUndo(t *testing.T) {
filePath := "../../resources/undo.csv"

View file

@ -38,7 +38,7 @@ func main() {
options := &db.IterOptions{
FillCache: false,
Prefix: []byte{prefixes.Undo},
Prefix: []byte{prefixes.HashXHistory},
Start: nil,
Stop: nil,
IncludeStart: true,
@ -49,7 +49,7 @@ func main() {
RawValue: true,
}
db.ReadWriteRawN(dbVal, options, "./resources/undo.csv", 2)
db.ReadWriteRawN(dbVal, options, "./resources/hashx_history.csv", 10)
return
}

View file

@ -0,0 +1,10 @@
7800000004c2acd7268f72f4000d9775,1bc1bc01
7800000009c71d9f181747b900000000,aecabb00d8dabb00
780000006f2a6a1b5fba57d900000000,49a04a032da14a03
78000000723d80469472f4e300000000,f93c8c01ed3f8c01
78000000fc2b16dafcee68a1000c30c5,e3550301
78000001a2d46d0ed0296f3400000000,df949e018d979e01
78000002beb8caef86cbf6d300000000,4c158400db158400
78000002bf30e17c155022a000000000,d32e4c00b0a79000
78000002d3344ae355d63a6c00000000,6840f401a842f401
780000033ac11d9bc4cf100a00000000,8144fe0140bcd602
1 7800000004c2acd7268f72f4000d9775 1bc1bc01
2 7800000009c71d9f181747b900000000 aecabb00d8dabb00
3 780000006f2a6a1b5fba57d900000000 49a04a032da14a03
4 78000000723d80469472f4e300000000 f93c8c01ed3f8c01
5 78000000fc2b16dafcee68a1000c30c5 e3550301
6 78000001a2d46d0ed0296f3400000000 df949e018d979e01
7 78000002beb8caef86cbf6d300000000 4c158400db158400
8 78000002bf30e17c155022a000000000 d32e4c00b0a79000
9 78000002d3344ae355d63a6c00000000 6840f401a842f401
10 780000033ac11d9bc4cf100a00000000 8144fe0140bcd602