This commit is contained in:
Jeffrey Picard 2022-01-14 20:42:57 -05:00
parent 5ec2eba2c1
commit 9311f859c8
4 changed files with 164 additions and 4 deletions

View file

@ -63,6 +63,88 @@ type PrefixRowKV struct {
Value interface{} Value interface{}
} }
type UndoKey struct {
Prefix []byte `json:"prefix"`
Height uint64 `json:"height"`
}
type UndoValue struct {
Data []byte `json:"data"`
}
func (k *UndoKey) PackKey() []byte {
prefixLen := 1
// b'>L'
n := prefixLen + 8
key := make([]byte, n)
copy(key, k.Prefix)
binary.BigEndian.PutUint64(key[prefixLen:], k.Height)
return key
}
func (v *UndoValue) PackValue() []byte {
len := len(v.Data)
value := make([]byte, len)
copy(value, v.Data)
return value
}
func UndoKeyPackPartialNFields(nFields int) func(*UndoKey) []byte {
return func(u *UndoKey) []byte {
return UndoKeyPackPartial(u, nFields)
}
}
func UndoKeyPackPartial(k *UndoKey, nFields int) []byte {
// Limit nFields between 0 and number of fields, we always at least need
// the prefix, and we never need to iterate past the number of fields.
if nFields > 1 {
nFields = 1
}
if nFields < 0 {
nFields = 0
}
// b'>4sLH'
prefixLen := 1
var n = prefixLen
for i := 0; i <= nFields; i++ {
switch i {
case 1:
n += 8
}
}
key := make([]byte, n)
for i := 0; i <= nFields; i++ {
switch i {
case 0:
copy(key, k.Prefix)
case 1:
binary.BigEndian.PutUint64(key[prefixLen:], k.Height)
}
}
return key
}
func UndoKeyUnpack(key []byte) *UndoKey {
prefixLen := 1
return &UndoKey{
Prefix: key[:prefixLen],
Height: binary.BigEndian.Uint64(key[prefixLen:]),
}
}
func UndoValueUnpack(value []byte) *UndoValue {
return &UndoValue{
Data: value,
}
}
type UTXOKey struct { type UTXOKey struct {
Prefix []byte `json:"prefix"` Prefix []byte `json:"prefix"`
HashX []byte `json:"hashx"` HashX []byte `json:"hashx"`
@ -2661,7 +2743,7 @@ func UnpackGenericKey(key []byte) (byte, interface{}, error) {
return RepostedClaim, RepostedKeyUnpack(key), nil return RepostedClaim, RepostedKeyUnpack(key), nil
case Undo: case Undo:
return 0x0, nil, errors.Base("key unpack function for %v not implemented", firstByte) return Undo, UndoKeyUnpack(key), nil
case ClaimDiff: case ClaimDiff:
return ClaimDiff, TouchedOrDeletedClaimKeyUnpack(key), nil return ClaimDiff, TouchedOrDeletedClaimKeyUnpack(key), nil
@ -2736,7 +2818,7 @@ func UnpackGenericValue(key, value []byte) (byte, interface{}, error) {
return RepostedClaim, RepostedValueUnpack(value), nil return RepostedClaim, RepostedValueUnpack(value), nil
case Undo: case Undo:
return 0x0, nil, errors.Base("value unpack not implemented for key %v", key) return Undo, UndoValueUnpack(value), nil
case ClaimDiff: case ClaimDiff:
return ClaimDiff, TouchedOrDeletedClaimValueUnpack(value), nil return ClaimDiff, TouchedOrDeletedClaimValueUnpack(value), nil

View file

@ -44,6 +44,82 @@ func testInit(filePath string) (*grocksdb.DB, [][]string, func()) {
return db, records, toDefer return db, records, toDefer
} }
func TestUndo(t *testing.T) {
filePath := "../../resources/undo.csv"
wOpts := grocksdb.NewDefaultWriteOptions()
db, records, toDefer := testInit(filePath)
defer toDefer()
for _, record := range records {
key, err := hex.DecodeString(record[0])
if err != nil {
log.Println(err)
}
val, err := hex.DecodeString(record[1])
if err != nil {
log.Println(err)
}
db.Put(wOpts, key, val)
}
// test prefix
options := dbpkg.NewIterateOptions().WithPrefix([]byte{prefixes.Undo}).WithIncludeValue(true)
ch := dbpkg.Iter(db, options)
var i = 0
for kv := range ch {
// log.Println(kv.Key)
gotKey := kv.Key.(*prefixes.UndoKey).PackKey()
keyPartial1 := prefixes.UndoKeyPackPartial(kv.Key.(*prefixes.UndoKey), 1)
// Check pack partial for sanity
if !bytes.HasPrefix(gotKey, keyPartial1) {
t.Errorf("%+v should be prefix of %+v\n", keyPartial1, gotKey)
}
got := kv.Value.(*prefixes.UndoValue).PackValue()
wantKey, err := hex.DecodeString(records[i][0])
if err != nil {
log.Println(err)
}
want, err := hex.DecodeString(records[i][1])
if err != nil {
log.Println(err)
}
if !bytes.Equal(gotKey, wantKey) {
t.Errorf("gotKey: %+v, wantKey: %+v\n", got, want)
}
if !bytes.Equal(got, want) {
t.Errorf("got: %+v, want: %+v\n", got, want)
}
i++
}
// Test start / stop
start, err := hex.DecodeString(records[0][0])
if err != nil {
log.Println(err)
}
stop, err := hex.DecodeString(records[1][0])
if err != nil {
log.Println(err)
}
options2 := dbpkg.NewIterateOptions().WithStart(start).WithStop(stop).WithIncludeValue(true)
ch2 := dbpkg.Iter(db, options2)
i = 0
for kv := range ch2 {
got := kv.Value.(*prefixes.UndoValue).PackValue()
want, err := hex.DecodeString(records[i][1])
if err != nil {
log.Println(err)
}
if !bytes.Equal(got, want) {
t.Errorf("got: %+v, want: %+v\n", got, want)
}
i++
}
}
func TestBlockHash(t *testing.T) { func TestBlockHash(t *testing.T) {
filePath := "../../resources/block_hash.csv" filePath := "../../resources/block_hash.csv"

View file

@ -38,7 +38,7 @@ func main() {
options := &db.IterOptions{ options := &db.IterOptions{
FillCache: false, FillCache: false,
Prefix: []byte{prefixes.BlockHash}, Prefix: []byte{prefixes.Undo},
Start: nil, Start: nil,
Stop: nil, Stop: nil,
IncludeStart: true, IncludeStart: true,
@ -49,7 +49,7 @@ func main() {
RawValue: true, RawValue: true,
} }
db.ReadWriteRawN(dbVal, options, "./resources/block_hash.csv", 10) db.ReadWriteRawN(dbVal, options, "./resources/undo.csv", 2)
return return
} }

2
resources/undo.csv Normal file

File diff suppressed because one or more lines are too long