rest the the keys
This commit is contained in:
parent
78931448e6
commit
6d126e82df
10 changed files with 781 additions and 90 deletions
|
@ -55,7 +55,8 @@ const (
|
||||||
ACTIVATED_CLAIM_TXO_TYPE = 1
|
ACTIVATED_CLAIM_TXO_TYPE = 1
|
||||||
ACTIVATED_SUPPORT_TXO_TYPE = 2
|
ACTIVATED_SUPPORT_TXO_TYPE = 2
|
||||||
|
|
||||||
OnesCompTwiddle uint64 = 0xffffffffffffffff
|
OnesCompTwiddle64 uint64 = 0xffffffffffffffff
|
||||||
|
OnesCompTwiddle32 uint32 = 0xffffffff
|
||||||
)
|
)
|
||||||
|
|
||||||
type PrefixRowKV struct {
|
type PrefixRowKV struct {
|
||||||
|
@ -63,6 +64,137 @@ type PrefixRowKV struct {
|
||||||
Value interface{}
|
Value interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
class DBState(typing.NamedTuple):
|
||||||
|
genesis: bytes
|
||||||
|
height: int
|
||||||
|
tx_count: int
|
||||||
|
tip: bytes
|
||||||
|
utxo_flush_count: int
|
||||||
|
wall_time: int
|
||||||
|
first_sync: bool
|
||||||
|
db_version: int
|
||||||
|
hist_flush_count: int
|
||||||
|
comp_flush_count: int
|
||||||
|
comp_cursor: int
|
||||||
|
es_sync_height: int
|
||||||
|
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
type DBStateKey struct {
|
||||||
|
Prefix []byte `json:"prefix"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DBStateValue struct {
|
||||||
|
Genesis []byte
|
||||||
|
Height uint32
|
||||||
|
TxCount uint32
|
||||||
|
Tip []byte
|
||||||
|
UtxoFlushCount uint32
|
||||||
|
WallTime uint32
|
||||||
|
FirstSync bool
|
||||||
|
DDVersion uint8
|
||||||
|
HistFlushCount int32
|
||||||
|
CompFlushCount int32
|
||||||
|
CompCursor int32
|
||||||
|
EsSyncHeight uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *DBStateKey) PackKey() []byte {
|
||||||
|
prefixLen := 1
|
||||||
|
n := prefixLen
|
||||||
|
key := make([]byte, n)
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *DBStateValue) PackValue() []byte {
|
||||||
|
// b'>32sLL32sLLBBlllL'
|
||||||
|
n := 32 + 4 + 4 + 32 + 4 + 4 + 1 + 1 + 4 + 4 + 4 + 4
|
||||||
|
value := make([]byte, n)
|
||||||
|
copy(value, v.Genesis[:32])
|
||||||
|
binary.BigEndian.PutUint32(value[32:], v.Height)
|
||||||
|
binary.BigEndian.PutUint32(value[32+4:], v.TxCount)
|
||||||
|
copy(value[32+4+4:], v.Tip[:32])
|
||||||
|
binary.BigEndian.PutUint32(value[32+4+4+32:], v.UtxoFlushCount)
|
||||||
|
binary.BigEndian.PutUint32(value[32+4+4+32+4:], v.WallTime)
|
||||||
|
var bitSetVar uint8
|
||||||
|
if v.FirstSync {
|
||||||
|
bitSetVar = 1
|
||||||
|
}
|
||||||
|
value[32+4+4+32+4+4] = bitSetVar
|
||||||
|
value[32+4+4+32+4+4+1] = v.DDVersion
|
||||||
|
var histFlushCount uint32
|
||||||
|
var compFlushCount uint32
|
||||||
|
var compCursor uint32
|
||||||
|
histFlushCount = (OnesCompTwiddle32 - uint32(v.HistFlushCount))
|
||||||
|
compFlushCount = (OnesCompTwiddle32 - uint32(v.CompFlushCount))
|
||||||
|
compCursor = (OnesCompTwiddle32 - uint32(v.CompCursor))
|
||||||
|
// if v.HistFlushCount < 0 {
|
||||||
|
// }
|
||||||
|
// if v.CompFlushCount < 0 {
|
||||||
|
// }
|
||||||
|
// if v.CompCursor < 0 {
|
||||||
|
// }
|
||||||
|
binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1:], histFlushCount)
|
||||||
|
binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1+4:], compFlushCount)
|
||||||
|
binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1+4+4:], compCursor)
|
||||||
|
binary.BigEndian.PutUint32(value[32+4+4+32+4+4+1+1+4+4+4:], v.EsSyncHeight)
|
||||||
|
log.Printf("%+v\n", v)
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func DBStateKeyPackPartialKey(key *DBStateKey) func(int) []byte {
|
||||||
|
return func(nFields int) []byte {
|
||||||
|
return DBStateKeyPackPartial(key, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DBStateKeyPackPartialNFields(nFields int) func(*DBStateKey) []byte {
|
||||||
|
return func(u *DBStateKey) []byte {
|
||||||
|
return DBStateKeyPackPartial(u, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DBStateKeyPackPartial(k *DBStateKey, nFields int) []byte {
|
||||||
|
prefixLen := 1
|
||||||
|
var n = prefixLen
|
||||||
|
|
||||||
|
key := make([]byte, n)
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func DBStateKeyUnpack(key []byte) *DBStateKey {
|
||||||
|
prefixLen := 1
|
||||||
|
return &DBStateKey{
|
||||||
|
Prefix: key[:prefixLen],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DBStateValueUnpack(value []byte) *DBStateValue {
|
||||||
|
x := &DBStateValue{
|
||||||
|
Genesis: value[:32],
|
||||||
|
Height: binary.BigEndian.Uint32(value[32:]),
|
||||||
|
TxCount: binary.BigEndian.Uint32(value[32+4:]),
|
||||||
|
Tip: value[32+4+4 : 32+4+4+32],
|
||||||
|
UtxoFlushCount: binary.BigEndian.Uint32(value[32+4+4+32:]),
|
||||||
|
WallTime: binary.BigEndian.Uint32(value[32+4+4+32+4:]),
|
||||||
|
FirstSync: value[32+4+4+32+4+4] == 1,
|
||||||
|
DDVersion: value[32+4+4+32+4+4+1],
|
||||||
|
HistFlushCount: int32(^binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1:])),
|
||||||
|
CompFlushCount: int32(^binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1+4:])),
|
||||||
|
CompCursor: int32(^binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1+4+4:])),
|
||||||
|
EsSyncHeight: binary.BigEndian.Uint32(value[32+4+4+32+4+4+1+1+4+4+4:]),
|
||||||
|
}
|
||||||
|
log.Printf("%+v\n", x)
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
type UndoKey struct {
|
type UndoKey struct {
|
||||||
Prefix []byte `json:"prefix"`
|
Prefix []byte `json:"prefix"`
|
||||||
Height uint64 `json:"height"`
|
Height uint64 `json:"height"`
|
||||||
|
@ -534,7 +666,98 @@ type BlockTxsKey struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type BlockTxsValue struct {
|
type BlockTxsValue struct {
|
||||||
TxHashes []byte `json:"tx_hashes"`
|
TxHashes [][]byte `json:"tx_hashes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *BlockTxsKey) PackKey() []byte {
|
||||||
|
prefixLen := 1
|
||||||
|
// b'>L'
|
||||||
|
n := prefixLen + 4
|
||||||
|
key := make([]byte, n)
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
binary.BigEndian.PutUint32(key[prefixLen:], k.Height)
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *BlockTxsValue) PackValue() []byte {
|
||||||
|
numHashes := len(v.TxHashes)
|
||||||
|
n := numHashes * 32
|
||||||
|
value := make([]byte, n)
|
||||||
|
|
||||||
|
for i, tx := range v.TxHashes {
|
||||||
|
if len(tx) != 32 {
|
||||||
|
log.Println("Warning, txhash not 32 bytes", tx)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
copy(value[i*32:], tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func BlockTxsKeyPackPartialKey(key *BlockTxsKey) func(int) []byte {
|
||||||
|
return func(nFields int) []byte {
|
||||||
|
return BlockTxsKeyPackPartial(key, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BlockTxsKeyPackPartialNFields(nFields int) func(*BlockTxsKey) []byte {
|
||||||
|
return func(u *BlockTxsKey) []byte {
|
||||||
|
return BlockTxsKeyPackPartial(u, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BlockTxsKeyPackPartial(k *BlockTxsKey, nFields int) []byte {
|
||||||
|
// Limit nFields between 0 and number of fields, we always at least need
|
||||||
|
// the prefix, and we never need to iterate past the number of fields.
|
||||||
|
if nFields > 1 {
|
||||||
|
nFields = 1
|
||||||
|
}
|
||||||
|
if nFields < 0 {
|
||||||
|
nFields = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixLen := 1
|
||||||
|
var n = prefixLen
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 1:
|
||||||
|
n += 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key := make([]byte, n)
|
||||||
|
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
case 1:
|
||||||
|
binary.BigEndian.PutUint32(key[prefixLen:], k.Height)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func BlockTxsKeyUnpack(key []byte) *BlockTxsKey {
|
||||||
|
prefixLen := 1
|
||||||
|
return &BlockTxsKey{
|
||||||
|
Prefix: key[:prefixLen],
|
||||||
|
Height: binary.BigEndian.Uint32(key[prefixLen:]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BlockTxsValueUnpack(value []byte) *BlockTxsValue {
|
||||||
|
numHashes := len(value) / 32
|
||||||
|
txs := make([][]byte, numHashes)
|
||||||
|
for i := 0; i < numHashes; i++ {
|
||||||
|
txs[i] = value[i*32 : (i+1)*32]
|
||||||
|
}
|
||||||
|
return &BlockTxsValue{
|
||||||
|
TxHashes: txs,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -555,6 +778,83 @@ type TxCountValue struct {
|
||||||
TxCount uint32 `json:"tx_count"`
|
TxCount uint32 `json:"tx_count"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k *TxCountKey) PackKey() []byte {
|
||||||
|
prefixLen := 1
|
||||||
|
// b'>L'
|
||||||
|
n := prefixLen + 4
|
||||||
|
key := make([]byte, n)
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
binary.BigEndian.PutUint32(key[prefixLen:], k.Height)
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *TxCountValue) PackValue() []byte {
|
||||||
|
value := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(value, v.TxCount)
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxCountKeyPackPartialKey(key *TxCountKey) func(int) []byte {
|
||||||
|
return func(nFields int) []byte {
|
||||||
|
return TxCountKeyPackPartial(key, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxCountKeyPackPartialNFields(nFields int) func(*TxCountKey) []byte {
|
||||||
|
return func(u *TxCountKey) []byte {
|
||||||
|
return TxCountKeyPackPartial(u, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxCountKeyPackPartial(k *TxCountKey, nFields int) []byte {
|
||||||
|
// Limit nFields between 0 and number of fields, we always at least need
|
||||||
|
// the prefix, and we never need to iterate past the number of fields.
|
||||||
|
if nFields > 1 {
|
||||||
|
nFields = 1
|
||||||
|
}
|
||||||
|
if nFields < 0 {
|
||||||
|
nFields = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixLen := 1
|
||||||
|
var n = prefixLen
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 1:
|
||||||
|
n += 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key := make([]byte, n)
|
||||||
|
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
case 1:
|
||||||
|
binary.BigEndian.PutUint32(key[prefixLen:], k.Height)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxCountKeyUnpack(key []byte) *TxCountKey {
|
||||||
|
prefixLen := 1
|
||||||
|
return &TxCountKey{
|
||||||
|
Prefix: key[:prefixLen],
|
||||||
|
Height: binary.BigEndian.Uint32(key[prefixLen:]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxCountValueUnpack(value []byte) *TxCountValue {
|
||||||
|
return &TxCountValue{
|
||||||
|
TxCount: binary.BigEndian.Uint32(value),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
class TxHashKey(NamedTuple):
|
class TxHashKey(NamedTuple):
|
||||||
tx_num: int
|
tx_num: int
|
||||||
|
@ -576,6 +876,84 @@ type TxHashValue struct {
|
||||||
TxHash []byte `json:"tx_hash"`
|
TxHash []byte `json:"tx_hash"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k *TxHashKey) PackKey() []byte {
|
||||||
|
prefixLen := 1
|
||||||
|
// b'>L'
|
||||||
|
n := prefixLen + 4
|
||||||
|
key := make([]byte, n)
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
binary.BigEndian.PutUint32(key[prefixLen:], k.TxNum)
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *TxHashValue) PackValue() []byte {
|
||||||
|
n := len(v.TxHash)
|
||||||
|
value := make([]byte, n)
|
||||||
|
copy(value, v.TxHash[:n])
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxHashKeyPackPartialKey(key *TxHashKey) func(int) []byte {
|
||||||
|
return func(nFields int) []byte {
|
||||||
|
return TxHashKeyPackPartial(key, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxHashKeyPackPartialNFields(nFields int) func(*TxHashKey) []byte {
|
||||||
|
return func(u *TxHashKey) []byte {
|
||||||
|
return TxHashKeyPackPartial(u, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxHashKeyPackPartial(k *TxHashKey, nFields int) []byte {
|
||||||
|
// Limit nFields between 0 and number of fields, we always at least need
|
||||||
|
// the prefix, and we never need to iterate past the number of fields.
|
||||||
|
if nFields > 1 {
|
||||||
|
nFields = 1
|
||||||
|
}
|
||||||
|
if nFields < 0 {
|
||||||
|
nFields = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixLen := 1
|
||||||
|
var n = prefixLen
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 1:
|
||||||
|
n += 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key := make([]byte, n)
|
||||||
|
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
case 1:
|
||||||
|
binary.BigEndian.PutUint32(key[prefixLen:], k.TxNum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxHashKeyUnpack(key []byte) *TxHashKey {
|
||||||
|
prefixLen := 1
|
||||||
|
return &TxHashKey{
|
||||||
|
Prefix: key[:prefixLen],
|
||||||
|
TxNum: binary.BigEndian.Uint32(key[prefixLen:]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxHashValueUnpack(value []byte) *TxHashValue {
|
||||||
|
return &TxHashValue{
|
||||||
|
TxHash: value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
class TxNumKey(NamedTuple):
|
class TxNumKey(NamedTuple):
|
||||||
tx_hash: bytes
|
tx_hash: bytes
|
||||||
|
@ -594,7 +972,84 @@ type TxNumKey struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type TxNumValue struct {
|
type TxNumValue struct {
|
||||||
TxNum int32 `json:"tx_num"`
|
TxNum uint32 `json:"tx_num"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *TxNumKey) PackKey() []byte {
|
||||||
|
prefixLen := 1
|
||||||
|
// b'>L'
|
||||||
|
n := prefixLen + 32
|
||||||
|
key := make([]byte, n)
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
copy(key[prefixLen:], k.TxHash[:32])
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *TxNumValue) PackValue() []byte {
|
||||||
|
value := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(value, v.TxNum)
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxNumKeyPackPartialKey(key *TxNumKey) func(int) []byte {
|
||||||
|
return func(nFields int) []byte {
|
||||||
|
return TxNumKeyPackPartial(key, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxNumKeyPackPartialNFields(nFields int) func(*TxNumKey) []byte {
|
||||||
|
return func(u *TxNumKey) []byte {
|
||||||
|
return TxNumKeyPackPartial(u, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxNumKeyPackPartial(k *TxNumKey, nFields int) []byte {
|
||||||
|
// Limit nFields between 0 and number of fields, we always at least need
|
||||||
|
// the prefix, and we never need to iterate past the number of fields.
|
||||||
|
if nFields > 1 {
|
||||||
|
nFields = 1
|
||||||
|
}
|
||||||
|
if nFields < 0 {
|
||||||
|
nFields = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixLen := 1
|
||||||
|
var n = prefixLen
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 1:
|
||||||
|
n += 32
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key := make([]byte, n)
|
||||||
|
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
case 1:
|
||||||
|
copy(key[prefixLen:], k.TxHash[:32])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxNumKeyUnpack(key []byte) *TxNumKey {
|
||||||
|
prefixLen := 1
|
||||||
|
return &TxNumKey{
|
||||||
|
Prefix: key[:prefixLen],
|
||||||
|
TxHash: key[prefixLen : prefixLen+32],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TxNumValueUnpack(value []byte) *TxNumValue {
|
||||||
|
return &TxNumValue{
|
||||||
|
TxNum: binary.BigEndian.Uint32(value),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1464,7 +1919,84 @@ type ChannelCountKey struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ChannelCountValue struct {
|
type ChannelCountValue struct {
|
||||||
Count int32 `json:"count"`
|
Count uint32 `json:"count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *ChannelCountKey) PackKey() []byte {
|
||||||
|
prefixLen := 1
|
||||||
|
// b'>20sLH'
|
||||||
|
n := prefixLen + 20
|
||||||
|
key := make([]byte, n)
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
copy(key[prefixLen:], k.ChannelHash[:20])
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *ChannelCountValue) PackValue() []byte {
|
||||||
|
value := make([]byte, 4)
|
||||||
|
binary.BigEndian.PutUint32(value, v.Count)
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelCountKeyPackPartialKey(key *ChannelCountKey) func(int) []byte {
|
||||||
|
return func(nFields int) []byte {
|
||||||
|
return ChannelCountKeyPackPartial(key, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelCountKeyPackPartialNFields(nFields int) func(*ChannelCountKey) []byte {
|
||||||
|
return func(u *ChannelCountKey) []byte {
|
||||||
|
return ChannelCountKeyPackPartial(u, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelCountKeyPackPartial(k *ChannelCountKey, nFields int) []byte {
|
||||||
|
// Limit nFields between 0 and number of fields, we always at least need
|
||||||
|
// the prefix, and we never need to iterate past the number of fields.
|
||||||
|
if nFields > 1 {
|
||||||
|
nFields = 1
|
||||||
|
}
|
||||||
|
if nFields < 0 {
|
||||||
|
nFields = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixLen := 1
|
||||||
|
var n = prefixLen
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 1:
|
||||||
|
n += 20
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key := make([]byte, n)
|
||||||
|
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
case 1:
|
||||||
|
copy(key[prefixLen:], k.ChannelHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelCountKeyUnpack(key []byte) *ChannelCountKey {
|
||||||
|
prefixLen := 1
|
||||||
|
return &ChannelCountKey{
|
||||||
|
Prefix: key[:prefixLen],
|
||||||
|
ChannelHash: key[prefixLen : prefixLen+20],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChannelCountValueUnpack(value []byte) *ChannelCountValue {
|
||||||
|
return &ChannelCountValue{
|
||||||
|
Count: binary.BigEndian.Uint32(value),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1485,7 +2017,84 @@ type SupportAmountKey struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type SupportAmountValue struct {
|
type SupportAmountValue struct {
|
||||||
Amount int32 `json:"amount"`
|
Amount uint64 `json:"amount"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *SupportAmountKey) PackKey() []byte {
|
||||||
|
prefixLen := 1
|
||||||
|
// b'>20sLH'
|
||||||
|
n := prefixLen + 20
|
||||||
|
key := make([]byte, n)
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
copy(key[prefixLen:], k.ClaimHash[:20])
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *SupportAmountValue) PackValue() []byte {
|
||||||
|
value := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(value, v.Amount)
|
||||||
|
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func SupportAmountKeyPackPartialKey(key *SupportAmountKey) func(int) []byte {
|
||||||
|
return func(nFields int) []byte {
|
||||||
|
return SupportAmountKeyPackPartial(key, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SupportAmountKeyPackPartialNFields(nFields int) func(*SupportAmountKey) []byte {
|
||||||
|
return func(u *SupportAmountKey) []byte {
|
||||||
|
return SupportAmountKeyPackPartial(u, nFields)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SupportAmountKeyPackPartial(k *SupportAmountKey, nFields int) []byte {
|
||||||
|
// Limit nFields between 0 and number of fields, we always at least need
|
||||||
|
// the prefix, and we never need to iterate past the number of fields.
|
||||||
|
if nFields > 1 {
|
||||||
|
nFields = 1
|
||||||
|
}
|
||||||
|
if nFields < 0 {
|
||||||
|
nFields = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixLen := 1
|
||||||
|
var n = prefixLen
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 1:
|
||||||
|
n += 20
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key := make([]byte, n)
|
||||||
|
|
||||||
|
for i := 0; i <= nFields; i++ {
|
||||||
|
switch i {
|
||||||
|
case 0:
|
||||||
|
copy(key, k.Prefix)
|
||||||
|
case 1:
|
||||||
|
copy(key[prefixLen:], k.ClaimHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func SupportAmountKeyUnpack(key []byte) *SupportAmountKey {
|
||||||
|
prefixLen := 1
|
||||||
|
return &SupportAmountKey{
|
||||||
|
Prefix: key[:prefixLen],
|
||||||
|
ClaimHash: key[prefixLen : prefixLen+20],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SupportAmountValueUnpack(value []byte) *SupportAmountValue {
|
||||||
|
return &SupportAmountValue{
|
||||||
|
Amount: binary.BigEndian.Uint64(value),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2401,7 +3010,7 @@ func (k *EffectiveAmountKey) PackKey() []byte {
|
||||||
|
|
||||||
binary.BigEndian.PutUint16(key[prefixLen:], uint16(nameLen))
|
binary.BigEndian.PutUint16(key[prefixLen:], uint16(nameLen))
|
||||||
copy(key[prefixLen+2:], []byte(k.NormalizedName))
|
copy(key[prefixLen+2:], []byte(k.NormalizedName))
|
||||||
binary.BigEndian.PutUint64(key[prefixLen+nameLenLen:], OnesCompTwiddle-k.EffectiveAmount)
|
binary.BigEndian.PutUint64(key[prefixLen+nameLenLen:], OnesCompTwiddle64-k.EffectiveAmount)
|
||||||
binary.BigEndian.PutUint32(key[prefixLen+nameLenLen+8:], k.TxNum)
|
binary.BigEndian.PutUint32(key[prefixLen+nameLenLen+8:], k.TxNum)
|
||||||
binary.BigEndian.PutUint16(key[prefixLen+nameLenLen+8+4:], k.Position)
|
binary.BigEndian.PutUint16(key[prefixLen+nameLenLen+8+4:], k.Position)
|
||||||
|
|
||||||
|
@ -2465,7 +3074,7 @@ func EffectiveAmountKeyPackPartial(k *EffectiveAmountKey, nFields int) []byte {
|
||||||
binary.BigEndian.PutUint16(key[prefixLen:], uint16(nameLen))
|
binary.BigEndian.PutUint16(key[prefixLen:], uint16(nameLen))
|
||||||
copy(key[prefixLen+2:], []byte(k.NormalizedName))
|
copy(key[prefixLen+2:], []byte(k.NormalizedName))
|
||||||
case 2:
|
case 2:
|
||||||
binary.BigEndian.PutUint64(key[prefixLen+nameLenLen:], OnesCompTwiddle-k.EffectiveAmount)
|
binary.BigEndian.PutUint64(key[prefixLen+nameLenLen:], OnesCompTwiddle64-k.EffectiveAmount)
|
||||||
case 3:
|
case 3:
|
||||||
binary.BigEndian.PutUint32(key[prefixLen+nameLenLen+8:], k.TxNum)
|
binary.BigEndian.PutUint32(key[prefixLen+nameLenLen+8:], k.TxNum)
|
||||||
case 4:
|
case 4:
|
||||||
|
@ -2482,7 +3091,7 @@ func EffectiveAmountKeyUnpack(key []byte) *EffectiveAmountKey {
|
||||||
return &EffectiveAmountKey{
|
return &EffectiveAmountKey{
|
||||||
Prefix: key[:prefixLen],
|
Prefix: key[:prefixLen],
|
||||||
NormalizedName: string(key[prefixLen+2 : prefixLen+2+int(nameLen)]),
|
NormalizedName: string(key[prefixLen+2 : prefixLen+2+int(nameLen)]),
|
||||||
EffectiveAmount: OnesCompTwiddle - binary.BigEndian.Uint64(key[prefixLen+2+int(nameLen):]),
|
EffectiveAmount: OnesCompTwiddle64 - binary.BigEndian.Uint64(key[prefixLen+2+int(nameLen):]),
|
||||||
TxNum: binary.BigEndian.Uint32(key[prefixLen+2+int(nameLen)+8:]),
|
TxNum: binary.BigEndian.Uint32(key[prefixLen+2+int(nameLen)+8:]),
|
||||||
Position: binary.BigEndian.Uint16(key[prefixLen+2+int(nameLen)+8+4:]),
|
Position: binary.BigEndian.Uint16(key[prefixLen+2+int(nameLen)+8+4:]),
|
||||||
}
|
}
|
||||||
|
@ -3220,10 +3829,36 @@ func generic(voidstar interface{}, firstByte byte, function byte, functionName s
|
||||||
case Header | 4<<8:
|
case Header | 4<<8:
|
||||||
return Header, BlockHeaderKeyPackPartialKey(voidstar.(*BlockHeaderKey)), nil
|
return Header, BlockHeaderKeyPackPartialKey(voidstar.(*BlockHeaderKey)), nil
|
||||||
case TxNum:
|
case TxNum:
|
||||||
return 0x0, nil, errors.Base("%s function for %v not implemented", functionName, firstByte)
|
return TxNum, TxNumKeyUnpack(data), nil
|
||||||
|
case TxNum | 1<<8:
|
||||||
|
return TxNum, TxNumValueUnpack(data), nil
|
||||||
|
case TxNum | 2<<8:
|
||||||
|
return TxNum, voidstar.(*TxNumKey).PackKey(), nil
|
||||||
|
case TxNum | 3<<8:
|
||||||
|
return TxNum, voidstar.(*TxNumValue).PackValue(), nil
|
||||||
|
case TxNum | 4<<8:
|
||||||
|
return TxNum, TxNumKeyPackPartialKey(voidstar.(*TxNumKey)), nil
|
||||||
|
|
||||||
case TxCount:
|
case TxCount:
|
||||||
|
return TxCount, TxCountKeyUnpack(data), nil
|
||||||
|
case TxCount | 1<<8:
|
||||||
|
return TxCount, TxCountValueUnpack(data), nil
|
||||||
|
case TxCount | 2<<8:
|
||||||
|
return TxCount, voidstar.(*TxCountKey).PackKey(), nil
|
||||||
|
case TxCount | 3<<8:
|
||||||
|
return TxCount, voidstar.(*TxCountValue).PackValue(), nil
|
||||||
|
case TxCount | 4<<8:
|
||||||
|
return TxCount, TxCountKeyPackPartialKey(voidstar.(*TxCountKey)), nil
|
||||||
case TxHash:
|
case TxHash:
|
||||||
return 0x0, nil, errors.Base("%s function for %v not implemented", functionName, firstByte)
|
return TxHash, TxHashKeyUnpack(data), nil
|
||||||
|
case TxHash | 1<<8:
|
||||||
|
return TxHash, TxHashValueUnpack(data), nil
|
||||||
|
case TxHash | 2<<8:
|
||||||
|
return TxHash, voidstar.(*TxHashKey).PackKey(), nil
|
||||||
|
case TxHash | 3<<8:
|
||||||
|
return TxHash, voidstar.(*TxHashValue).PackValue(), nil
|
||||||
|
case TxHash | 4<<8:
|
||||||
|
return TxHash, TxHashKeyPackPartialKey(voidstar.(*TxHashKey)), nil
|
||||||
case UTXO:
|
case UTXO:
|
||||||
return UTXO, UTXOKeyUnpack(data), nil
|
return UTXO, UTXOKeyUnpack(data), nil
|
||||||
case UTXO | 1<<8:
|
case UTXO | 1<<8:
|
||||||
|
@ -3255,9 +3890,46 @@ func generic(voidstar interface{}, firstByte byte, function byte, functionName s
|
||||||
case HashXHistory | 4<<8:
|
case HashXHistory | 4<<8:
|
||||||
return HashXHistory, HashXHistoryKeyPackPartialKey(voidstar.(*HashXHistoryKey)), nil
|
return HashXHistory, HashXHistoryKeyPackPartialKey(voidstar.(*HashXHistoryKey)), nil
|
||||||
case DBState:
|
case DBState:
|
||||||
|
return DBState, DBStateKeyUnpack(data), nil
|
||||||
|
case DBState | 1<<8:
|
||||||
|
return DBState, DBStateValueUnpack(data), nil
|
||||||
|
case DBState | 2<<8:
|
||||||
|
return DBState, voidstar.(*DBStateKey).PackKey(), nil
|
||||||
|
case DBState | 3<<8:
|
||||||
|
return DBState, voidstar.(*DBStateValue).PackValue(), nil
|
||||||
|
case DBState | 4<<8:
|
||||||
|
return DBState, DBStateKeyPackPartialKey(voidstar.(*DBStateKey)), nil
|
||||||
|
|
||||||
case ChannelCount:
|
case ChannelCount:
|
||||||
|
return ChannelCount, ChannelCountKeyUnpack(data), nil
|
||||||
|
case ChannelCount | 1<<8:
|
||||||
|
return ChannelCount, ChannelCountValueUnpack(data), nil
|
||||||
|
case ChannelCount | 2<<8:
|
||||||
|
return ChannelCount, voidstar.(*ChannelCountKey).PackKey(), nil
|
||||||
|
case ChannelCount | 3<<8:
|
||||||
|
return ChannelCount, voidstar.(*ChannelCountValue).PackValue(), nil
|
||||||
|
case ChannelCount | 4<<8:
|
||||||
|
return ChannelCount, ChannelCountKeyPackPartialKey(voidstar.(*ChannelCountKey)), nil
|
||||||
case SupportAmount:
|
case SupportAmount:
|
||||||
|
return SupportAmount, SupportAmountKeyUnpack(data), nil
|
||||||
|
case SupportAmount | 1<<8:
|
||||||
|
return SupportAmount, SupportAmountValueUnpack(data), nil
|
||||||
|
case SupportAmount | 2<<8:
|
||||||
|
return SupportAmount, voidstar.(*SupportAmountKey).PackKey(), nil
|
||||||
|
case SupportAmount | 3<<8:
|
||||||
|
return SupportAmount, voidstar.(*SupportAmountValue).PackValue(), nil
|
||||||
|
case SupportAmount | 4<<8:
|
||||||
|
return SupportAmount, SupportAmountKeyPackPartialKey(voidstar.(*SupportAmountKey)), nil
|
||||||
case BlockTXs:
|
case BlockTXs:
|
||||||
|
return BlockTXs, BlockTxsKeyUnpack(data), nil
|
||||||
|
case BlockTXs | 1<<8:
|
||||||
|
return BlockTXs, BlockTxsValueUnpack(data), nil
|
||||||
|
case BlockTXs | 2<<8:
|
||||||
|
return BlockTXs, voidstar.(*BlockTxsKey).PackKey(), nil
|
||||||
|
case BlockTXs | 3<<8:
|
||||||
|
return BlockTXs, voidstar.(*BlockTxsValue).PackValue(), nil
|
||||||
|
case BlockTXs | 4<<8:
|
||||||
|
return BlockTXs, BlockTxsKeyPackPartialKey(voidstar.(*BlockTxsKey)), nil
|
||||||
|
|
||||||
}
|
}
|
||||||
return 0x0, nil, errors.Base("%s function for %v not implemented", functionName, firstByte)
|
return 0x0, nil, errors.Base("%s function for %v not implemented", functionName, firstByte)
|
||||||
|
|
|
@ -106,11 +106,12 @@ func testGeneric(filePath string, prefix byte, numPartials int) func(*testing.T)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
}
|
}
|
||||||
var numRecords = 9
|
numRecords := i
|
||||||
if prefix == prefixes.Undo {
|
// var numRecords = 9
|
||||||
numRecords = 1
|
// if prefix == prefixes.Undo || prefix == prefixes.DBState {
|
||||||
}
|
// numRecords = 1
|
||||||
stop, err := hex.DecodeString(records[numRecords][0])
|
// }
|
||||||
|
stop, err := hex.DecodeString(records[numRecords-1][0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
}
|
}
|
||||||
|
@ -134,6 +135,34 @@ func testGeneric(filePath string, prefix byte, numPartials int) func(*testing.T)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSupportAmount(t *testing.T) {
|
||||||
|
testGeneric("../../resources/support_amount.csv", prefixes.SupportAmount, 1)(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChannelCount(t *testing.T) {
|
||||||
|
testGeneric("../../resources/channel_count.csv", prefixes.ChannelCount, 1)(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDBState(t *testing.T) {
|
||||||
|
testGeneric("../../resources/db_state.csv", prefixes.DBState, 0)(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlockTxs(t *testing.T) {
|
||||||
|
testGeneric("../../resources/block_txs.csv", prefixes.BlockTXs, 1)(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxCount(t *testing.T) {
|
||||||
|
testGeneric("../../resources/tx_count.csv", prefixes.TxCount, 1)(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxHash(t *testing.T) {
|
||||||
|
testGeneric("../../resources/tx_hash.csv", prefixes.TxHash, 1)(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTxNum(t *testing.T) {
|
||||||
|
testGeneric("../../resources/tx_num.csv", prefixes.TxNum, 1)(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestTx(t *testing.T) {
|
func TestTx(t *testing.T) {
|
||||||
testGeneric("../../resources/tx.csv", prefixes.Tx, 1)(t)
|
testGeneric("../../resources/tx.csv", prefixes.Tx, 1)(t)
|
||||||
}
|
}
|
||||||
|
@ -244,79 +273,8 @@ func TestUTXO(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHashXUTXO(t *testing.T) {
|
func TestHashXUTXO(t *testing.T) {
|
||||||
|
filePath := "../../resources/hashx_utxo.csv"
|
||||||
tests := []struct {
|
testGeneric(filePath, prefixes.HashXUTXO, 3)(t)
|
||||||
name string
|
|
||||||
filePath string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Read HashX_UTXO correctly",
|
|
||||||
filePath: "../../resources/hashx_utxo.csv",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
log.Println(tt.filePath)
|
|
||||||
file, err := os.Open(tt.filePath)
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
reader := csv.NewReader(file)
|
|
||||||
records, err := reader.ReadAll()
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
wOpts := grocksdb.NewDefaultWriteOptions()
|
|
||||||
opts := grocksdb.NewDefaultOptions()
|
|
||||||
opts.SetCreateIfMissing(true)
|
|
||||||
db, err := grocksdb.OpenDb(opts, "tmp")
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
db.Close()
|
|
||||||
err = os.RemoveAll("./tmp")
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
for _, record := range records {
|
|
||||||
key, err := hex.DecodeString(record[0])
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
val, err := hex.DecodeString(record[1])
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
db.Put(wOpts, key, val)
|
|
||||||
}
|
|
||||||
start, err := hex.DecodeString(records[0][0])
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
options := dbpkg.NewIterateOptions().WithPrefix([]byte{prefixes.HashXUTXO}).WithStart(start).WithIncludeValue(true)
|
|
||||||
ch := dbpkg.Iter(db, options)
|
|
||||||
var i = 0
|
|
||||||
for kv := range ch {
|
|
||||||
log.Println(kv.Key)
|
|
||||||
got := kv.Value.(*prefixes.HashXUTXOValue).PackValue()
|
|
||||||
want, err := hex.DecodeString(records[i][1])
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(got, want) {
|
|
||||||
t.Errorf("got: %+v, want: %+v\n", got, want)
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
if i > 9 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUTXOKey_String(t *testing.T) {
|
func TestUTXOKey_String(t *testing.T) {
|
||||||
|
|
4
main.go
4
main.go
|
@ -38,7 +38,7 @@ func main() {
|
||||||
|
|
||||||
options := &db.IterOptions{
|
options := &db.IterOptions{
|
||||||
FillCache: false,
|
FillCache: false,
|
||||||
Prefix: []byte{prefixes.Tx},
|
Prefix: []byte{prefixes.SupportAmount},
|
||||||
Start: nil,
|
Start: nil,
|
||||||
Stop: nil,
|
Stop: nil,
|
||||||
IncludeStart: true,
|
IncludeStart: true,
|
||||||
|
@ -49,7 +49,7 @@ func main() {
|
||||||
RawValue: true,
|
RawValue: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
db.ReadWriteRawN(dbVal, options, "./resources/tx.csv", 10)
|
db.ReadWriteRawN(dbVal, options, "./resources/support_amount.csv", 10)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
10
resources/block_txs.csv
Normal file
10
resources/block_txs.csv
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
6200000001,ba888e2f9c037f831046f8ad09f6d378f79c728d003b177a64d29621f481da5d
|
||||||
|
6200000002,09d8734d81b5f2eb1b653caf17491544ddfbc72f2f4c0c3f22a3362db5ba9d47
|
||||||
|
6200000003,e285dbf24334585b9a924536a717160ee185a86d1eeb7b19684538685eca761a
|
||||||
|
6200000004,d83cf1408debbd631950b7a95b0c940772119cd8a615a3d44601568713fec80c
|
||||||
|
6200000005,47638e54178dbdddf2e81a3b7566860e5264df6066755f9760a893f5caecc579
|
||||||
|
6200000006,ec91627e0dba856b933983425d7f72958e8f974682632a0fa2acee9cfd819401
|
||||||
|
6200000007,a3c4a19948a1263722c45c5601fd10a7aea7cf73bfa45e060508f109155e80ab
|
||||||
|
6200000008,0fc2da46cf0de0057c1b9fc93d997105ff6cf2c8c43269b446c1dbf5ac18be8c
|
||||||
|
6200000009,7356a733f87e592ea133328792dd9d676ed83771c8ff0f519928ce752f159ba6
|
||||||
|
620000000a,54a598c4356ce620a604004929af14f4c03c42eba017288a4a1d186aedfdd8f4
|
|
10
resources/channel_count.csv
Normal file
10
resources/channel_count.csv
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
5a00009d4f7ab04598c25390fe543aba137c149233,00000000
|
||||||
|
5a00009f35397ada0476b04c67978ad081b50833ed,00000001
|
||||||
|
5a0000cbef248847373c999de142bc2d7da4d01410,00000001
|
||||||
|
5a0000eff19123a0b3087a0f059a5e0c10d7437756,00000017
|
||||||
|
5a0000f3ebc97800e26f68f0c45dfd05bf54489190,00000006
|
||||||
|
5a000191553161252e30656c931e0dffb98d8f97f8,00000001
|
||||||
|
5a000201be81dc3e91efb767f1e61c5fb7e91893d7,00000008
|
||||||
|
5a00023c709799e614bf512a6b97297aee15272bf2,00000007
|
||||||
|
5a000278a3339c1dbcd26b825f0ec7b8420b0e7fdb,00000001
|
||||||
|
5a000286086a1430ee22305cec2540bd69a61be3d1,0000000c
|
|
1
resources/db_state.csv
Normal file
1
resources/db_state.csv
Normal file
|
@ -0,0 +1 @@
|
||||||
|
73,9c89283ba0f3227f6c03b70216b9f665f0118d5e0fa729cedf4fb34d6a34f46300102d0a03cf8317b51915dc662e9786b9d4c0c7443652ea58ea91124e2c62dfd3ab23dbd7c9446900102d0d001168ae000700102d0dffffffffffffffff00102d0a
|
|
10
resources/support_amount.csv
Normal file
10
resources/support_amount.csv
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
6100000324e40fcb63a0b517a3660645e9bd99244a,0000000001312d00
|
||||||
|
6100000e474ea919000015b80ccb7f4e6cc73e2f52,0000000000000000
|
||||||
|
61000023415fc7ba8a470f0cdf4a66bffacd5ba979,000000005c18bd6f
|
||||||
|
610000298e7db49c1f582e316bb3706fc3c71193cf,0000000001f0f430
|
||||||
|
6100002c5bca153faaf3c644304f8a259340064f6c,0000000000000000
|
||||||
|
6100002e6db2ae2c415a34d2d36d3cf61ac7133196,000000000bebc200
|
||||||
|
6100002ea3970f0f658f50dbdb27abbb716ed01c80,0000000000000000
|
||||||
|
610000345ff10a01448c42bf1a89a4399e8b82c1aa,0000000001ceef65
|
||||||
|
610000437bd840e2463d3dfc8c80e66e2585dd02b3,00000000000eb7a7
|
||||||
|
61000063d2be270f861565ffd8f07a118ea9dff179,000000000bebc200
|
|
10
resources/tx_count.csv
Normal file
10
resources/tx_count.csv
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
5400000000,00000001
|
||||||
|
5400000001,00000002
|
||||||
|
5400000002,00000003
|
||||||
|
5400000003,00000004
|
||||||
|
5400000004,00000005
|
||||||
|
5400000005,00000006
|
||||||
|
5400000006,00000007
|
||||||
|
5400000007,00000008
|
||||||
|
5400000008,00000009
|
||||||
|
5400000009,0000000a
|
|
10
resources/tx_hash.csv
Normal file
10
resources/tx_hash.csv
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
5800000000,cc59e59ff97ac092b55e423aa5495151ed6fb80570a5bb78cd5bd1c3821c21b8
|
||||||
|
5800000001,ba888e2f9c037f831046f8ad09f6d378f79c728d003b177a64d29621f481da5d
|
||||||
|
5800000002,09d8734d81b5f2eb1b653caf17491544ddfbc72f2f4c0c3f22a3362db5ba9d47
|
||||||
|
5800000003,e285dbf24334585b9a924536a717160ee185a86d1eeb7b19684538685eca761a
|
||||||
|
5800000004,d83cf1408debbd631950b7a95b0c940772119cd8a615a3d44601568713fec80c
|
||||||
|
5800000005,47638e54178dbdddf2e81a3b7566860e5264df6066755f9760a893f5caecc579
|
||||||
|
5800000006,ec91627e0dba856b933983425d7f72958e8f974682632a0fa2acee9cfd819401
|
||||||
|
5800000007,a3c4a19948a1263722c45c5601fd10a7aea7cf73bfa45e060508f109155e80ab
|
||||||
|
5800000008,0fc2da46cf0de0057c1b9fc93d997105ff6cf2c8c43269b446c1dbf5ac18be8c
|
||||||
|
5800000009,7356a733f87e592ea133328792dd9d676ed83771c8ff0f519928ce752f159ba6
|
|
10
resources/tx_num.csv
Normal file
10
resources/tx_num.csv
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
4e00000031a2e262d60074f07330d7187907e5b02be8f9b3c60cdc03d776314912,01376ce8
|
||||||
|
4e0000004e91edda0f9cd3bcef9565a31e6bbbd34c731483e03ec7d8819158ac30,030ee002
|
||||||
|
4e0000008070865693cd82ed0f59896e34973adbff0583fb8a1293919591446075,03518017
|
||||||
|
4e0000009c24d4d9187749a1f8f6d6c0f92a5e98817f5efcd427a5593344a45a2e,019436d7
|
||||||
|
4e000000cee66b136a85596df24ece60bbd1392f70204fd2f144f059e5195ee3c9,00169e07
|
||||||
|
4e000000e0bf96accd4eda4d871c5bc8c0ebb14509b896a867095f9b419f9b04f2,02bcc37a
|
||||||
|
4e0000011daab2a9f45cca3d0bac6fee69324af4757b1c4e8c3a362efbb6e8a09d,00c4c1e3
|
||||||
|
4e00000150116856ce8ce06604b9584e61afb3b613a8a5a512ab260e7f0cbe5496,003a51fa
|
||||||
|
4e0000015ba8df5c3ed80d0fc364136de02e3ba9d9550cbb1ecef03b97fcdf0621,00d725d0
|
||||||
|
4e000001d4a53fc92321415631862a791f8680241ed172e579534713f68a6869ba,025e8166
|
|
Loading…
Add table
Reference in a new issue