Add RepostedCount, EffectiveAmount prefix rows (#51)
* Rename prefix EffectiveAmount -> BidOrder. * Add RepostedCount, EffectiveAmount prefix rows. Add testdata. * Update db_get.go helpers to use EffectiveAmount, RepostedCount tables. Update tests.
This commit is contained in:
parent
cbdcc5faeb
commit
9d9c73f97f
10 changed files with 318 additions and 55 deletions
58
db/db_get.go
58
db/db_get.go
|
@ -212,28 +212,25 @@ func (db *ReadOnlyDBColumnFamily) GetRepost(claimHash []byte) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (db *ReadOnlyDBColumnFamily) GetRepostedCount(claimHash []byte) (int, error) {
|
||||
handle, err := db.EnsureHandle(prefixes.RepostedClaim)
|
||||
handle, err := db.EnsureHandle(prefixes.RepostedCount)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
key := prefixes.NewRepostedKey(claimHash)
|
||||
keyPrefix := key.PartialPack(1)
|
||||
// Prefix and handle
|
||||
options := NewIterateOptions().WithPrefix(keyPrefix).WithCfHandle(handle)
|
||||
// Start and stop bounds
|
||||
// options = options.WithStart(keyPrefix)
|
||||
// Don't include the key
|
||||
options = options.WithIncludeValue(false)
|
||||
key := prefixes.RepostedCountKey{Prefix: []byte{prefixes.RepostedCount}, ClaimHash: claimHash}
|
||||
rawKey := key.PackKey()
|
||||
|
||||
var i int = 0
|
||||
ch := IterCF(db.DB, options)
|
||||
|
||||
for range ch {
|
||||
i++
|
||||
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
|
||||
defer slice.Free()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if slice.Size() == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return i, nil
|
||||
value := prefixes.RepostedCountValue{}
|
||||
value.UnpackValue(slice.Data())
|
||||
return int(value.RepostedCount), nil
|
||||
}
|
||||
|
||||
func (db *ReadOnlyDBColumnFamily) GetChannelForClaim(claimHash []byte, txNum uint32, position uint16) ([]byte, error) {
|
||||
|
@ -286,21 +283,32 @@ func (db *ReadOnlyDBColumnFamily) GetActiveAmount(claimHash []byte, txoType uint
|
|||
}
|
||||
|
||||
func (db *ReadOnlyDBColumnFamily) GetEffectiveAmount(claimHash []byte, supportOnly bool) (uint64, error) {
|
||||
supportAmount, err := db.GetActiveAmount(claimHash, prefixes.ActivatedSupportTXOType, db.Height+1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if supportOnly {
|
||||
supportAmount, err := db.GetActiveAmount(claimHash, prefixes.ActivatedSupportTXOType, db.Height+1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return supportAmount, nil
|
||||
}
|
||||
|
||||
activationAmount, err := db.GetActiveAmount(claimHash, prefixes.ActivateClaimTXOType, db.Height+1)
|
||||
handle, err := db.EnsureHandle(prefixes.EffectiveAmount)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return activationAmount + supportAmount, nil
|
||||
key := prefixes.EffectiveAmountKey{Prefix: []byte{prefixes.EffectiveAmount}, ClaimHash: claimHash}
|
||||
rawKey := key.PackKey()
|
||||
slice, err := db.DB.GetCF(db.Opts, handle, rawKey)
|
||||
defer slice.Free()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if slice.Size() == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
value := prefixes.EffectiveAmountValue{}
|
||||
value.UnpackValue(slice.Data())
|
||||
return value.EffectiveAmount, nil
|
||||
}
|
||||
|
||||
func (db *ReadOnlyDBColumnFamily) GetSupportAmount(claimHash []byte) (uint64, error) {
|
||||
|
@ -519,13 +527,13 @@ func (db *ReadOnlyDBColumnFamily) GetDBState() (*prefixes.DBStateValue, error) {
|
|||
return value, nil
|
||||
}
|
||||
|
||||
func (db *ReadOnlyDBColumnFamily) EffectiveAmountNameIter(normalizedName string) <-chan *prefixes.PrefixRowKV {
|
||||
handle, err := db.EnsureHandle(prefixes.EffectiveAmount)
|
||||
func (db *ReadOnlyDBColumnFamily) BidOrderNameIter(normalizedName string) <-chan *prefixes.PrefixRowKV {
|
||||
handle, err := db.EnsureHandle(prefixes.BidOrder)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := prefixes.NewEffectiveAmountKey(normalizedName)
|
||||
key := prefixes.NewBidOrderKey(normalizedName)
|
||||
var rawKeyPrefix []byte = nil
|
||||
rawKeyPrefix = key.PartialPack(1)
|
||||
options := NewIterateOptions().WithCfHandle(handle).WithPrefix(rawKeyPrefix)
|
||||
|
|
|
@ -281,15 +281,15 @@ func (db *ReadOnlyDBColumnFamily) ResolveParsedUrl(parsed *PathSegment) (*Resolv
|
|||
|
||||
// Resolve by amount ordering
|
||||
log.Warn("resolving by amount ordering")
|
||||
ch := db.EffectiveAmountNameIter(normalizedName)
|
||||
ch := db.BidOrderNameIter(normalizedName)
|
||||
var i = 0
|
||||
for kv := range ch {
|
||||
if i+1 < amountOrder {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
key := kv.Key.(*prefixes.EffectiveAmountKey)
|
||||
claimVal := kv.Value.(*prefixes.EffectiveAmountValue)
|
||||
key := kv.Key.(*prefixes.BidOrderKey)
|
||||
claimVal := kv.Value.(*prefixes.BidOrderValue)
|
||||
claimTxo, err := db.GetCachedClaimTxo(claimVal.ClaimHash, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -331,6 +331,7 @@ func TestGetDBState(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetRepostedClaim(t *testing.T) {
|
||||
t.Skip("skipping obsolete? test of prefix W (Reposted)")
|
||||
channelHash, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bd")
|
||||
want := 5
|
||||
// Should be non-existent
|
||||
|
@ -363,6 +364,39 @@ func TestGetRepostedClaim(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetRepostedCount(t *testing.T) {
|
||||
channelHash, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bd")
|
||||
want := 5
|
||||
// Should be non-existent
|
||||
channelHash2, _ := hex.DecodeString("2556ed1cab9d17f2a9392030a9ad7f5d138f11bf")
|
||||
filePath := "../testdata/j_resolve.csv"
|
||||
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer toDefer()
|
||||
|
||||
count, err := db.GetRepostedCount(channelHash)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
log.Println(count)
|
||||
|
||||
if count != want {
|
||||
t.Errorf("Expected %d, got %d", want, count)
|
||||
}
|
||||
|
||||
count2, err := db.GetRepostedCount(channelHash2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if count2 != 0 {
|
||||
t.Errorf("Expected 0, got %d", count2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintRepost(t *testing.T) {
|
||||
filePath := "../testdata/V_resolve.csv"
|
||||
CatCSV(filePath)
|
||||
|
@ -536,9 +570,9 @@ func TestGetClaimToChannel(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetEffectiveAmount(t *testing.T) {
|
||||
func TestGetEffectiveAmountSupportOnly(t *testing.T) {
|
||||
filePath := "../testdata/S_resolve.csv"
|
||||
want := uint64(586370959900)
|
||||
want := uint64(78999149300)
|
||||
claimHashStr := "2556ed1cab9d17f2a9392030a9ad7f5d138f11bd"
|
||||
claimHash, _ := hex.DecodeString(claimHashStr)
|
||||
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
|
@ -558,6 +592,28 @@ func TestGetEffectiveAmount(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetEffectiveAmount(t *testing.T) {
|
||||
filePath := "../testdata/i_resolve.csv"
|
||||
want := uint64(507171810600)
|
||||
claimHashStr := "2556ed1cab9d17f2a9392030a9ad7f5d138f11bd"
|
||||
claimHash, _ := hex.DecodeString(claimHashStr)
|
||||
db, _, toDefer, err := OpenAndFillTmpDBColumnFamlies(filePath)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer toDefer()
|
||||
db.Height = 1116054
|
||||
|
||||
amount, err := db.GetEffectiveAmount(claimHash, false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if amount != want {
|
||||
t.Errorf("Expected %d, got %d", want, amount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSupportAmount(t *testing.T) {
|
||||
want := uint64(8654754160700)
|
||||
claimHashStr := "2556ed1cab9d17f2a9392030a9ad7f5d138f11bd"
|
||||
|
|
|
@ -32,7 +32,7 @@ const (
|
|||
ChannelToClaim = 'J'
|
||||
|
||||
ClaimShortIdPrefix = 'F'
|
||||
EffectiveAmount = 'D'
|
||||
BidOrder = 'D'
|
||||
ClaimExpiration = 'O'
|
||||
|
||||
ClaimTakeover = 'P'
|
||||
|
@ -66,6 +66,9 @@ const (
|
|||
HashXStatus = 'f'
|
||||
HashXMempoolStatus = 'g'
|
||||
|
||||
EffectiveAmount = 'i'
|
||||
RepostedCount = 'j'
|
||||
|
||||
ActivateClaimTXOType = 1
|
||||
ActivatedSupportTXOType = 2
|
||||
|
||||
|
@ -83,7 +86,7 @@ func GetPrefixes() [][]byte {
|
|||
{ClaimToChannel},
|
||||
{ChannelToClaim},
|
||||
{ClaimShortIdPrefix},
|
||||
{EffectiveAmount},
|
||||
{BidOrder},
|
||||
{ClaimExpiration},
|
||||
{ClaimTakeover},
|
||||
{PendingActivation},
|
||||
|
@ -111,6 +114,8 @@ func GetPrefixes() [][]byte {
|
|||
{TouchedHashX},
|
||||
{HashXStatus},
|
||||
{HashXMempoolStatus},
|
||||
{EffectiveAmount},
|
||||
{RepostedCount},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2665,7 +2670,7 @@ func ActiveAmountValueUnpack(value []byte) *ActiveAmountValue {
|
|||
|
||||
type OnesComplementEffectiveAmount uint64
|
||||
|
||||
type EffectiveAmountKey struct {
|
||||
type BidOrderKey struct {
|
||||
Prefix []byte `struct:"[1]byte" json:"prefix"`
|
||||
LengthEncodedNormalizedName // fields NormalizedNameLen, NormalizedName
|
||||
EffectiveAmount OnesComplementEffectiveAmount `json:"effective_amount"`
|
||||
|
@ -2673,18 +2678,18 @@ type EffectiveAmountKey struct {
|
|||
Position uint16 `json:"position"`
|
||||
}
|
||||
|
||||
type EffectiveAmountValue struct {
|
||||
type BidOrderValue struct {
|
||||
ClaimHash []byte `struct:"[20]byte" json:"claim_hash"`
|
||||
}
|
||||
|
||||
func NewEffectiveAmountKey(normalizedName string) *EffectiveAmountKey {
|
||||
return &EffectiveAmountKey{
|
||||
Prefix: []byte{EffectiveAmount},
|
||||
func NewBidOrderKey(normalizedName string) *BidOrderKey {
|
||||
return &BidOrderKey{
|
||||
Prefix: []byte{BidOrder},
|
||||
LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(normalizedName),
|
||||
}
|
||||
}
|
||||
|
||||
func (k *EffectiveAmountKey) PackKey() []byte {
|
||||
func (k *BidOrderKey) PackKey() []byte {
|
||||
prefixLen := 1
|
||||
// 2 byte length field, plus number of bytes in name
|
||||
nameLen := len(k.NormalizedName)
|
||||
|
@ -2703,7 +2708,7 @@ func (k *EffectiveAmountKey) PackKey() []byte {
|
|||
return key
|
||||
}
|
||||
|
||||
func (v *EffectiveAmountValue) PackValue() []byte {
|
||||
func (v *BidOrderValue) PackValue() []byte {
|
||||
// b'>20s'
|
||||
value := make([]byte, 20)
|
||||
copy(value, v.ClaimHash[:20])
|
||||
|
@ -2711,11 +2716,11 @@ func (v *EffectiveAmountValue) PackValue() []byte {
|
|||
return value
|
||||
}
|
||||
|
||||
func (kv *EffectiveAmountKey) NumFields() int {
|
||||
func (kv *BidOrderKey) NumFields() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
func (k *EffectiveAmountKey) PartialPack(fields int) []byte {
|
||||
func (k *BidOrderKey) PartialPack(fields int) []byte {
|
||||
// Limit fields between 0 and number of fields, we always at least need
|
||||
// the prefix, and we never need to iterate past the number of fields.
|
||||
nameLen := len(k.NormalizedName)
|
||||
|
@ -2763,10 +2768,10 @@ func (k *EffectiveAmountKey) PartialPack(fields int) []byte {
|
|||
return key
|
||||
}
|
||||
|
||||
func EffectiveAmountKeyUnpack(key []byte) *EffectiveAmountKey {
|
||||
func BidOrderKeyUnpack(key []byte) *BidOrderKey {
|
||||
prefixLen := 1
|
||||
nameLen := binary.BigEndian.Uint16(key[prefixLen:])
|
||||
return &EffectiveAmountKey{
|
||||
return &BidOrderKey{
|
||||
Prefix: key[:prefixLen],
|
||||
LengthEncodedNormalizedName: NewLengthEncodedNormalizedName(string(key[prefixLen+2 : prefixLen+2+int(nameLen)])),
|
||||
EffectiveAmount: OnesComplementEffectiveAmount(OnesCompTwiddle64 - binary.BigEndian.Uint64(key[prefixLen+2+int(nameLen):])),
|
||||
|
@ -2775,8 +2780,8 @@ func EffectiveAmountKeyUnpack(key []byte) *EffectiveAmountKey {
|
|||
}
|
||||
}
|
||||
|
||||
func EffectiveAmountValueUnpack(value []byte) *EffectiveAmountValue {
|
||||
return &EffectiveAmountValue{
|
||||
func BidOrderValueUnpack(value []byte) *BidOrderValue {
|
||||
return &BidOrderValue{
|
||||
ClaimHash: value[:20],
|
||||
}
|
||||
}
|
||||
|
@ -2968,6 +2973,62 @@ func RepostedValueUnpack(value []byte) *RepostedValue {
|
|||
}
|
||||
}
|
||||
|
||||
type RepostedCountKey struct {
|
||||
Prefix []byte `struct:"[1]byte" json:"prefix"`
|
||||
ClaimHash []byte `struct:"[20]byte" json:"claim_hash"`
|
||||
}
|
||||
|
||||
type RepostedCountValue struct {
|
||||
RepostedCount uint32 `json:"reposted_count"`
|
||||
}
|
||||
|
||||
func (kv *RepostedCountKey) NumFields() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (kv *RepostedCountKey) PartialPack(fields int) []byte {
|
||||
// b'>20s'
|
||||
n := len(kv.Prefix) + 20
|
||||
buf := make([]byte, n)
|
||||
offset := 0
|
||||
offset += copy(buf[offset:], kv.Prefix[:1])
|
||||
if fields <= 0 {
|
||||
return buf[:offset]
|
||||
}
|
||||
offset += copy(buf[offset:], kv.ClaimHash[:20])
|
||||
return buf[:offset]
|
||||
}
|
||||
|
||||
func (kv *RepostedCountKey) PackKey() []byte {
|
||||
return kv.PartialPack(kv.NumFields())
|
||||
}
|
||||
|
||||
func (kv *RepostedCountKey) UnpackKey(buf []byte) {
|
||||
// b'>20s'
|
||||
offset := 0
|
||||
kv.Prefix = buf[offset : offset+1]
|
||||
offset += 1
|
||||
kv.ClaimHash = buf[offset : offset+20]
|
||||
offset += 20
|
||||
}
|
||||
|
||||
func (kv *RepostedCountValue) PackValue() []byte {
|
||||
// b'>L'
|
||||
n := 4
|
||||
buf := make([]byte, n)
|
||||
offset := 0
|
||||
binary.BigEndian.PutUint32(buf[offset:], kv.RepostedCount)
|
||||
offset += 4
|
||||
return buf[:offset]
|
||||
}
|
||||
|
||||
func (kv *RepostedCountValue) UnpackValue(buf []byte) {
|
||||
// b'>L'
|
||||
offset := 0
|
||||
kv.RepostedCount = binary.BigEndian.Uint32(buf[offset:])
|
||||
offset += 4
|
||||
}
|
||||
|
||||
type TouchedOrDeletedClaimKey struct {
|
||||
Prefix []byte `struct:"[1]byte" json:"prefix"`
|
||||
Height int32 `json:"height"`
|
||||
|
@ -3462,6 +3523,62 @@ func (kv *HashXStatusValue) UnpackValue(buf []byte) {
|
|||
type HashXMempoolStatusKey = HashXStatusKey
|
||||
type HashXMempoolStatusValue = HashXStatusValue
|
||||
|
||||
type EffectiveAmountKey struct {
|
||||
Prefix []byte `struct:"[1]byte" json:"prefix"`
|
||||
ClaimHash []byte `struct:"[20]byte" json:"claim_hash"`
|
||||
}
|
||||
|
||||
type EffectiveAmountValue struct {
|
||||
EffectiveAmount uint64 `json:"effective_amount"`
|
||||
}
|
||||
|
||||
func (kv *EffectiveAmountKey) NumFields() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (kv *EffectiveAmountKey) PartialPack(fields int) []byte {
|
||||
// b'>20s'
|
||||
n := len(kv.Prefix) + 20
|
||||
buf := make([]byte, n)
|
||||
offset := 0
|
||||
offset += copy(buf[offset:], kv.Prefix[:1])
|
||||
if fields <= 0 {
|
||||
return buf[:offset]
|
||||
}
|
||||
offset += copy(buf[offset:], kv.ClaimHash[:20])
|
||||
return buf[:offset]
|
||||
}
|
||||
|
||||
func (kv *EffectiveAmountKey) PackKey() []byte {
|
||||
return kv.PartialPack(kv.NumFields())
|
||||
}
|
||||
|
||||
func (kv *EffectiveAmountKey) UnpackKey(buf []byte) {
|
||||
// b'>20s'
|
||||
offset := 0
|
||||
kv.Prefix = buf[offset : offset+1]
|
||||
offset += 1
|
||||
kv.ClaimHash = buf[offset : offset+20]
|
||||
offset += 20
|
||||
}
|
||||
|
||||
func (kv *EffectiveAmountValue) PackValue() []byte {
|
||||
// b'>Q'
|
||||
n := 8
|
||||
buf := make([]byte, n)
|
||||
offset := 0
|
||||
binary.BigEndian.PutUint64(buf[offset:], kv.EffectiveAmount)
|
||||
offset += 8
|
||||
return buf[:offset]
|
||||
}
|
||||
|
||||
func (kv *EffectiveAmountValue) UnpackValue(buf []byte) {
|
||||
// b'>Q'
|
||||
offset := 0
|
||||
kv.EffectiveAmount = binary.BigEndian.Uint64(buf[offset:])
|
||||
offset += 8
|
||||
}
|
||||
|
||||
func UnpackGenericKey(key []byte) (BaseKey, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, fmt.Errorf("key length zero")
|
||||
|
@ -3651,18 +3768,18 @@ var prefixRegistry = map[byte]prefixMeta{
|
|||
return ClaimShortIDValueUnpack(buf)
|
||||
},
|
||||
},
|
||||
EffectiveAmount: {
|
||||
BidOrder: {
|
||||
newKey: func() interface{} {
|
||||
return &EffectiveAmountKey{Prefix: []byte{EffectiveAmount}}
|
||||
return &BidOrderKey{Prefix: []byte{BidOrder}}
|
||||
},
|
||||
newValue: func() interface{} {
|
||||
return &EffectiveAmountValue{}
|
||||
return &BidOrderValue{}
|
||||
},
|
||||
newKeyUnpack: func(buf []byte) interface{} {
|
||||
return EffectiveAmountKeyUnpack(buf)
|
||||
return BidOrderKeyUnpack(buf)
|
||||
},
|
||||
newValueUnpack: func(buf []byte) interface{} {
|
||||
return EffectiveAmountValueUnpack(buf)
|
||||
return BidOrderValueUnpack(buf)
|
||||
},
|
||||
},
|
||||
ClaimExpiration: {
|
||||
|
@ -4018,4 +4135,20 @@ var prefixRegistry = map[byte]prefixMeta{
|
|||
return &HashXMempoolStatusValue{}
|
||||
},
|
||||
},
|
||||
RepostedCount: {
|
||||
newKey: func() interface{} {
|
||||
return &RepostedCountKey{Prefix: []byte{RepostedCount}}
|
||||
},
|
||||
newValue: func() interface{} {
|
||||
return &RepostedCountValue{}
|
||||
},
|
||||
},
|
||||
EffectiveAmount: {
|
||||
newKey: func() interface{} {
|
||||
return &EffectiveAmountKey{Prefix: []byte{EffectiveAmount}}
|
||||
},
|
||||
newValue: func() interface{} {
|
||||
return &EffectiveAmountValue{}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -311,9 +311,9 @@ func TestActiveAmount(t *testing.T) {
|
|||
testGeneric(filePath, prefixes.ActiveAmount, 5)(t)
|
||||
}
|
||||
|
||||
func TestEffectiveAmount(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.EffectiveAmount)
|
||||
testGeneric(filePath, prefixes.EffectiveAmount, 4)(t)
|
||||
func TestBidOrder(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.BidOrder)
|
||||
testGeneric(filePath, prefixes.BidOrder, 4)(t)
|
||||
}
|
||||
|
||||
func TestRepost(t *testing.T) {
|
||||
|
@ -326,6 +326,14 @@ func TestRepostedClaim(t *testing.T) {
|
|||
testGeneric(filePath, prefixes.RepostedClaim, 3)(t)
|
||||
}
|
||||
|
||||
func TestRepostedCount(t *testing.T) {
|
||||
prefix := byte(prefixes.RepostedCount)
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefix)
|
||||
//synthesizeTestData([]byte{prefix}, filePath, []int{20}, []int{4}, [][3]int{})
|
||||
key := &prefixes.RepostedCountKey{}
|
||||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func TestClaimDiff(t *testing.T) {
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefixes.ClaimDiff)
|
||||
testGeneric(filePath, prefixes.ClaimDiff, 1)(t)
|
||||
|
@ -418,6 +426,14 @@ func TestHashXMempoolStatus(t *testing.T) {
|
|||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func TestEffectiveAmount(t *testing.T) {
|
||||
prefix := byte(prefixes.EffectiveAmount)
|
||||
filePath := fmt.Sprintf("../../testdata/%c.csv", prefix)
|
||||
//synthesizeTestData([]byte{prefix}, filePath, []int{20}, []int{8}, [][3]int{})
|
||||
key := &prefixes.EffectiveAmountKey{}
|
||||
testGeneric(filePath, prefix, key.NumFields())(t)
|
||||
}
|
||||
|
||||
func synthesizeTestData(prefix []byte, filePath string, keyFixed, valFixed []int, valVariable [][3]int) {
|
||||
file, err := os.OpenFile(filePath, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
|
|
4
testdata/S_resolve.csv
vendored
4
testdata/S_resolve.csv
vendored
|
@ -1,6 +1,6 @@
|
|||
S,,
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a6b67006286030000,0000007615cbad28
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a706a0063105c0000,000000000bebc200
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd01000a6b67006286030000,0000007615cbad28
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd01000a706a0063105c0000,000000000bebc200
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a73ea006367550000,0000000005f5e100
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a7d63006469750000,0000000db0b7c894
|
||||
S,532556ed1cab9d17f2a9392030a9ad7f5d138f11bd02000a7ebf00648c480000,00000000b2d05e00
|
||||
|
|
|
21
testdata/i.csv
vendored
Normal file
21
testdata/i.csv
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
i,
|
||||
6903dc9970183b8a05d118c84a37c355fe34d95d01,0e74dd23295a4610
|
||||
6916bd29750d8d92b32677eda07e10af313c0019d9,ff0579207ec6e594
|
||||
6926bdfcb4a1ad81f460ad561283584695cd6cea59,b834b13a8918262f
|
||||
6930776827481ec15fa07e0dc266e376846467237d,4bf0a5127a1216dc
|
||||
6955a1eaf08f9468a6c3565fe16b2ae4b726045538,e32029de8b58dd6e
|
||||
69614fa6bc0cea1366377456bc88dda9ec7b6d4c3c,55bf2d8e0e262697
|
||||
6971e7b039dde5797ae167b9783f87c4a67711799d,9197b827b560fc34
|
||||
697765a71d8a4082d056baaae83f8b4af1e124f5e9,62c0d5dfde7ef884
|
||||
6993e121642c01e2edca50d565ff3df1a656e83ebd,1796c74886d45045
|
||||
69af0684076bc64adcbcd621de9c09fd15dade3e17,f9240ab9a9650d9f
|
||||
69b70fdcc95d3b51ec28872a143c7a6fc947e6a58e,a8950968d95759a9
|
||||
69bdb90916f78badec506a895b0edceb47533297f9,331c0ca597601ed7
|
||||
69c276b7070ba79e75c82e4d97a70e4428dd3be058,85c61c842e5bfe7f
|
||||
69cb8215b0c9440227a9e7e76bce468bdb4fa0f714,9c42e1ba41275362
|
||||
69d2556fe7b8fce36a71c78e0001452298903ef81b,f61cf52e7e645bf8
|
||||
69d2677078f1448c0e4295711e61a85a9fb6a280d1,28d57b45b1700cb3
|
||||
69dd979490444cab3efc1e517f83369df6e2e279a3,dad1b5133cc15dd4
|
||||
69e6e6834cf0da132c77af275cbab9885cbbc3f022,df55f9fd0278ca71
|
||||
69ea25444b8d0ab22ac355f4b692b0d7006b672f4a,0703f38a1428ff8e
|
||||
69fcb79c5e463ac4a0e078f9cd24e2c718b66c40d6,5be7b71b6dca5a20
|
|
4
testdata/i_resolve.csv
vendored
Normal file
4
testdata/i_resolve.csv
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
i,,
|
||||
i,692556ed1cab9d17f2a9392030a9ad7f5d138f11bd,0000007615cbad28
|
||||
i,692556ed1cab9d17f2a9392030a9ad7f5d138faf01,000000000bebc200
|
||||
i,692556ed1cab9d17f2a9392030a9ad7f5d138fb074,0000000005f5e100
|
|
21
testdata/j.csv
vendored
Normal file
21
testdata/j.csv
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
j,
|
||||
6a2bb6a2e0505748602cb9a194ba8ea4abb6935407,cc786896
|
||||
6a44f45448398072520cd2415044dc3fbfc4f77d94,b5d69fdb
|
||||
6a464f73e50c5ac613e29959eaf7862989381fd2d7,f4a3151d
|
||||
6a615c78bcea987123689221ec5546f4555c7ddf4d,02e0ca23
|
||||
6a86f3151c381d0e7061583051ea2de133976cab73,b1f56fd8
|
||||
6a875a5f2579fce1aed7b452dbcfb982161d9d35ad,fbe72e11
|
||||
6a8edc85a5a8aa78fd6a7f0a9e3755121238ae5dcb,2f3ec916
|
||||
6a90efc239731fa0b83c2a386c1426e8768ceb2123,6b8b1649
|
||||
6a951540c279d1286d7800d205aea75f514b9e8fdb,e78656c9
|
||||
6aa687dae05e6d629d5056e1af651519dfc669f40c,07665a81
|
||||
6abaa8f75ae7182dfa70b293317acd3aaa8d021b5f,f51abc2b
|
||||
6abc6bcaf274827e976bfa8ee5801d24c4b37bb77b,d171f0fe
|
||||
6ac5717e0820d8bcf758690666a7dff87850e58af1,afbe5e50
|
||||
6ac6cfb7ee16de9c7f6498939558881ffa346f0918,00a40c49
|
||||
6ad24f0b126ae7bcdfb70f51b3ade58bbfd22dc94c,739a1ba9
|
||||
6ad89bcd32e80b4b89b6ac066d87e1a1356d7d5e4e,5605f288
|
||||
6ae49f7dcc373786b526e4393ff46d300ee5f4a9dd,dfe41d24
|
||||
6aedfe781fb0ce858856eff6aacc2206525545e476,59508a47
|
||||
6aeffec292f14000b7c073b861f2ad83c5511a2df8,afe94781
|
||||
6afbdd9ec076dbf81511264f00f021c9667e52cb67,51ffc92a
|
|
4
testdata/j_resolve.csv
vendored
Normal file
4
testdata/j_resolve.csv
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
j,,
|
||||
j,6a2556ed1cab9d17f2a9392030a9ad7f5d138f11bd,00000005
|
||||
j,6a255761310145baa958b5587d9b5571423e5a0d3c,00000005
|
||||
j,6a255761310145baa958b5587d9b5571423f00c85b,0000000a
|
|
Loading…
Reference in a new issue