[lbry] claimtrie: import current snapshot
Sync to tip Co-authored-by: Brannon King <countprimes@gmail.com>
This commit is contained in:
parent
2dcdb458e8
commit
ccaa6dd816
42 changed files with 4689 additions and 0 deletions
83
claimtrie/block/blockrepo/pebble.go
Normal file
83
claimtrie/block/blockrepo/pebble.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
package blockrepo
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
)
|
||||
|
||||
type Pebble struct {
|
||||
db *pebble.DB
|
||||
}
|
||||
|
||||
func NewPebble(path string) (*Pebble, error) {
|
||||
|
||||
db, err := pebble.Open(path, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pebble open %s, %w", path, err)
|
||||
}
|
||||
|
||||
repo := &Pebble{db: db}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (repo *Pebble) Load() (int32, error) {
|
||||
|
||||
iter := repo.db.NewIter(nil)
|
||||
if !iter.Last() {
|
||||
if err := iter.Close(); err != nil {
|
||||
return 0, fmt.Errorf("close iter: %w", err)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
height := int32(binary.BigEndian.Uint32(iter.Key()))
|
||||
if err := iter.Close(); err != nil {
|
||||
return height, fmt.Errorf("close iter: %w", err)
|
||||
}
|
||||
|
||||
return height, nil
|
||||
}
|
||||
|
||||
func (repo *Pebble) Get(height int32) (*chainhash.Hash, error) {
|
||||
|
||||
key := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(key, uint32(height))
|
||||
|
||||
b, closer, err := repo.db.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
hash, err := chainhash.NewHash(b)
|
||||
|
||||
return hash, err
|
||||
}
|
||||
|
||||
func (repo *Pebble) Set(height int32, hash *chainhash.Hash) error {
|
||||
|
||||
key := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(key, uint32(height))
|
||||
|
||||
return repo.db.Set(key, hash[:], pebble.NoSync)
|
||||
}
|
||||
|
||||
func (repo *Pebble) Close() error {
|
||||
|
||||
err := repo.db.Flush()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble fludh: %w", err)
|
||||
}
|
||||
|
||||
err = repo.db.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble close: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
13
claimtrie/block/repo.go
Normal file
13
claimtrie/block/repo.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
package block
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
)
|
||||
|
||||
// Repo defines APIs for Block to access persistence layer.
|
||||
type Repo interface {
|
||||
Load() (int32, error)
|
||||
Set(height int32, hash *chainhash.Hash) error
|
||||
Get(height int32) (*chainhash.Hash, error)
|
||||
Close() error
|
||||
}
|
91
claimtrie/chain/chainrepo/pebble.go
Normal file
91
claimtrie/chain/chainrepo/pebble.go
Normal file
|
@ -0,0 +1,91 @@
|
|||
package chainrepo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/vmihailenco/msgpack/v5"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
)
|
||||
|
||||
type Pebble struct {
|
||||
db *pebble.DB
|
||||
}
|
||||
|
||||
func NewPebble(path string) (*Pebble, error) {
|
||||
|
||||
db, err := pebble.Open(path, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pebble open %s, %w", path, err)
|
||||
}
|
||||
|
||||
repo := &Pebble{db: db}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (repo *Pebble) Save(height int32, changes []change.Change) error {
|
||||
|
||||
if len(changes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := bytes.NewBuffer(nil)
|
||||
err := binary.Write(key, binary.BigEndian, height)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble prepare key: %w", err)
|
||||
}
|
||||
|
||||
value, err := msgpack.Marshal(changes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble msgpack marshal: %w", err)
|
||||
}
|
||||
|
||||
err = repo.db.Set(key.Bytes(), value, pebble.NoSync)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble set: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repo *Pebble) Load(height int32) ([]change.Change, error) {
|
||||
|
||||
key := bytes.NewBuffer(nil)
|
||||
err := binary.Write(key, binary.BigEndian, height)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pebble prepare key: %w", err)
|
||||
}
|
||||
|
||||
b, closer, err := repo.db.Get(key.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
var changes []change.Change
|
||||
err = msgpack.Unmarshal(b, &changes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pebble msgpack marshal: %w", err)
|
||||
}
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func (repo *Pebble) Close() error {
|
||||
|
||||
err := repo.db.Flush()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble fludh: %w", err)
|
||||
}
|
||||
|
||||
err = repo.db.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble close: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
9
claimtrie/chain/repo.go
Normal file
9
claimtrie/chain/repo.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package chain
|
||||
|
||||
import "github.com/btcsuite/btcd/claimtrie/change"
|
||||
|
||||
type Repo interface {
|
||||
Save(height int32, changes []change.Change) error
|
||||
Load(height int32) ([]change.Change, error)
|
||||
Close() error
|
||||
}
|
59
claimtrie/change/change.go
Normal file
59
claimtrie/change/change.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
package change
|
||||
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
AddClaim ChangeType = iota
|
||||
SpendClaim
|
||||
UpdateClaim
|
||||
AddSupport
|
||||
SpendSupport
|
||||
)
|
||||
|
||||
type Change struct {
|
||||
Type ChangeType
|
||||
Height int32
|
||||
|
||||
Name []byte
|
||||
ClaimID string
|
||||
OutPoint string
|
||||
Amount int64
|
||||
Value []byte
|
||||
|
||||
ActiveHeight int32 // for normalization fork
|
||||
VisibleHeight int32
|
||||
}
|
||||
|
||||
func New(typ ChangeType) Change {
|
||||
return Change{Type: typ}
|
||||
}
|
||||
|
||||
func (c Change) SetHeight(height int32) Change {
|
||||
c.Height = height
|
||||
return c
|
||||
}
|
||||
|
||||
func (c Change) SetName(name []byte) Change {
|
||||
c.Name = name
|
||||
return c
|
||||
}
|
||||
|
||||
func (c Change) SetClaimID(claimID string) Change {
|
||||
c.ClaimID = claimID
|
||||
return c
|
||||
}
|
||||
|
||||
func (c Change) SetOutPoint(op string) Change {
|
||||
c.OutPoint = op
|
||||
return c
|
||||
}
|
||||
|
||||
func (c Change) SetAmount(amt int64) Change {
|
||||
c.Amount = amt
|
||||
return c
|
||||
}
|
||||
|
||||
func (c Change) SetValue(value []byte) Change {
|
||||
c.Value = value
|
||||
return c
|
||||
}
|
372
claimtrie/claimtrie.go
Normal file
372
claimtrie/claimtrie.go
Normal file
|
@ -0,0 +1,372 @@
|
|||
package claimtrie
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/claimtrie/block"
|
||||
"github.com/btcsuite/btcd/claimtrie/block/blockrepo"
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/btcsuite/btcd/claimtrie/config"
|
||||
"github.com/btcsuite/btcd/claimtrie/merkletrie"
|
||||
"github.com/btcsuite/btcd/claimtrie/merkletrie/merkletrierepo"
|
||||
"github.com/btcsuite/btcd/claimtrie/node"
|
||||
"github.com/btcsuite/btcd/claimtrie/node/noderepo"
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
"github.com/btcsuite/btcd/claimtrie/temporal"
|
||||
"github.com/btcsuite/btcd/claimtrie/temporal/temporalrepo"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// ClaimTrie implements a Merkle Trie supporting linear history of commits.
|
||||
type ClaimTrie struct {
|
||||
|
||||
// Repository for reported block hashes (debugging purpose).
|
||||
reportedBlockRepo block.Repo
|
||||
|
||||
// Repository for calculated block hashes.
|
||||
blockRepo block.Repo
|
||||
|
||||
// Repository for storing temporal information of nodes at each block height.
|
||||
// For example, which nodes (by name) should be refreshed at each block height
|
||||
// due to stake expiration or delayed activation.
|
||||
temporalRepo temporal.Repo
|
||||
|
||||
// Cache layer of Nodes.
|
||||
nodeManager node.Manager
|
||||
|
||||
// Prefix tree (trie) that manages merkle hash of each node.
|
||||
merkleTrie *merkletrie.MerkleTrie
|
||||
|
||||
// Current block height, which is increased by one when AppendBlock() is called.
|
||||
height int32
|
||||
|
||||
// Write buffer for batching changes written to repo.
|
||||
// flushed before block is appended.
|
||||
// changes []change.Change
|
||||
|
||||
// Registrered cleanup functions which are invoked in the Close() in reverse order.
|
||||
cleanups []func() error
|
||||
}
|
||||
|
||||
func New() (*ClaimTrie, error) {
|
||||
|
||||
cfg := config.GenerateConfig(param.ClaimtrieDataFolder)
|
||||
var cleanups []func() error
|
||||
|
||||
blockRepo, err := blockrepo.NewPebble(cfg.BlockRepoPebble.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new block repo: %w", err)
|
||||
}
|
||||
cleanups = append(cleanups, blockRepo.Close)
|
||||
|
||||
temporalRepo, err := temporalrepo.NewPebble(cfg.TemporalRepoPebble.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new temporal repo: %w", err)
|
||||
}
|
||||
cleanups = append(cleanups, temporalRepo.Close)
|
||||
|
||||
// Initialize repository for changes to nodes.
|
||||
// The cleanup is delegated to the Node Manager.
|
||||
nodeRepo, err := noderepo.NewPebble(cfg.NodeRepoPebble.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new node repo: %w", err)
|
||||
}
|
||||
|
||||
baseManager, err := node.NewBaseManager(nodeRepo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new node manager: %w", err)
|
||||
}
|
||||
nodeManager := node.NewNormalizingManager(baseManager)
|
||||
cleanups = append(cleanups, nodeManager.Close)
|
||||
|
||||
// Initialize repository for MerkleTrie.
|
||||
// The cleanup is delegated to MerkleTrie.
|
||||
trieRepo, err := merkletrierepo.NewPebble(cfg.MerkleTrieRepoPebble.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new trie repo: %w", err)
|
||||
}
|
||||
|
||||
reportedBlockRepo, err := blockrepo.NewPebble(cfg.ReportedBlockRepoPebble.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new reported block repo: %w", err)
|
||||
}
|
||||
cleanups = append(cleanups, reportedBlockRepo.Close)
|
||||
|
||||
trie := merkletrie.New(nodeManager, trieRepo)
|
||||
cleanups = append(cleanups, trie.Close)
|
||||
|
||||
// Restore the last height.
|
||||
previousHeight, err := blockRepo.Load()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load blocks: %w", err)
|
||||
}
|
||||
|
||||
if previousHeight > 0 {
|
||||
hash, err := blockRepo.Get(previousHeight)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get hash: %w", err)
|
||||
}
|
||||
trie.SetRoot(hash)
|
||||
|
||||
_, err = nodeManager.IncrementHeightTo(previousHeight)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("node manager init: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ct := &ClaimTrie{
|
||||
blockRepo: blockRepo,
|
||||
temporalRepo: temporalRepo,
|
||||
|
||||
nodeManager: nodeManager,
|
||||
merkleTrie: trie,
|
||||
|
||||
height: previousHeight,
|
||||
|
||||
reportedBlockRepo: reportedBlockRepo,
|
||||
|
||||
cleanups: cleanups,
|
||||
}
|
||||
|
||||
return ct, nil
|
||||
}
|
||||
|
||||
// AddClaim adds a Claim to the ClaimTrie.
|
||||
func (ct *ClaimTrie) AddClaim(name []byte, op wire.OutPoint, id node.ClaimID, amt int64, val []byte) error {
|
||||
|
||||
chg := change.Change{
|
||||
Type: change.AddClaim,
|
||||
Name: name,
|
||||
OutPoint: op.String(),
|
||||
Amount: amt,
|
||||
ClaimID: id.String(),
|
||||
Value: val,
|
||||
}
|
||||
|
||||
return ct.forwardNodeChange(chg)
|
||||
}
|
||||
|
||||
// UpdateClaim updates a Claim in the ClaimTrie.
|
||||
func (ct *ClaimTrie) UpdateClaim(name []byte, op wire.OutPoint, amt int64, id node.ClaimID, val []byte) error {
|
||||
|
||||
chg := change.Change{
|
||||
Type: change.UpdateClaim,
|
||||
Name: name,
|
||||
OutPoint: op.String(),
|
||||
Amount: amt,
|
||||
ClaimID: id.String(),
|
||||
Value: val,
|
||||
}
|
||||
|
||||
return ct.forwardNodeChange(chg)
|
||||
}
|
||||
|
||||
// SpendClaim spends a Claim in the ClaimTrie.
|
||||
func (ct *ClaimTrie) SpendClaim(name []byte, op wire.OutPoint, id node.ClaimID) error {
|
||||
|
||||
chg := change.Change{
|
||||
Type: change.SpendClaim,
|
||||
Name: name,
|
||||
OutPoint: op.String(),
|
||||
ClaimID: id.String(),
|
||||
}
|
||||
|
||||
return ct.forwardNodeChange(chg)
|
||||
}
|
||||
|
||||
// AddSupport adds a Support to the ClaimTrie.
|
||||
func (ct *ClaimTrie) AddSupport(name []byte, value []byte, op wire.OutPoint, amt int64, id node.ClaimID) error {
|
||||
|
||||
chg := change.Change{
|
||||
Type: change.AddSupport,
|
||||
Name: name,
|
||||
OutPoint: op.String(),
|
||||
Amount: amt,
|
||||
ClaimID: id.String(),
|
||||
Value: value,
|
||||
}
|
||||
|
||||
return ct.forwardNodeChange(chg)
|
||||
}
|
||||
|
||||
// SpendSupport spends a Support in the ClaimTrie.
|
||||
func (ct *ClaimTrie) SpendSupport(name []byte, op wire.OutPoint, id node.ClaimID) error {
|
||||
|
||||
chg := change.Change{
|
||||
Type: change.SpendSupport,
|
||||
Name: name,
|
||||
OutPoint: op.String(),
|
||||
ClaimID: id.String(),
|
||||
}
|
||||
|
||||
return ct.forwardNodeChange(chg)
|
||||
}
|
||||
|
||||
// AppendBlock increases block by one.
|
||||
func (ct *ClaimTrie) AppendBlock() error {
|
||||
|
||||
ct.height++
|
||||
names, err := ct.nodeManager.IncrementHeightTo(ct.height)
|
||||
if err != nil {
|
||||
return fmt.Errorf("node mgr increment: %w", err)
|
||||
}
|
||||
|
||||
expirations, err := ct.temporalRepo.NodesAt(ct.height)
|
||||
if err != nil {
|
||||
return fmt.Errorf("temporal repo nodes at: %w", err)
|
||||
}
|
||||
|
||||
names = removeDuplicates(names) // comes out sorted
|
||||
|
||||
updateNames := make([][]byte, 0, len(names)+len(expirations))
|
||||
updateHeights := make([]int32, 0, len(names)+len(expirations))
|
||||
updateNames = append(updateNames, names...)
|
||||
for range names { // log to the db that we updated a name at this height for rollback purposes
|
||||
updateHeights = append(updateHeights, ct.height)
|
||||
}
|
||||
names = append(names, expirations...)
|
||||
names = removeDuplicates(names)
|
||||
|
||||
for _, name := range names {
|
||||
|
||||
ct.merkleTrie.Update(name, true)
|
||||
|
||||
newName, nextUpdate := ct.nodeManager.NextUpdateHeightOfNode(name)
|
||||
if nextUpdate <= 0 {
|
||||
continue // some names are no longer there; that's not an error
|
||||
}
|
||||
updateNames = append(updateNames, newName) // TODO: make sure using the temporalRepo batch is actually faster
|
||||
updateHeights = append(updateHeights, nextUpdate)
|
||||
}
|
||||
err = ct.temporalRepo.SetNodesAt(updateNames, updateHeights)
|
||||
if err != nil {
|
||||
return fmt.Errorf("temporal repo set at: %w", err)
|
||||
}
|
||||
|
||||
hitFork := ct.updateTrieForHashForkIfNecessary()
|
||||
|
||||
h := ct.MerkleHash()
|
||||
ct.blockRepo.Set(ct.height, h)
|
||||
|
||||
if hitFork {
|
||||
ct.merkleTrie.SetRoot(h) // for clearing the memory entirely
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ct *ClaimTrie) updateTrieForHashForkIfNecessary() bool {
|
||||
if ct.height != param.AllClaimsInMerkleForkHeight {
|
||||
return false
|
||||
}
|
||||
fmt.Printf("Marking all trie nodes as dirty for the hash fork...")
|
||||
// invalidate all names because we have to recompute the hash on everything
|
||||
// requires its own 8GB of RAM in current trie impl.
|
||||
ct.nodeManager.IterateNames(func(name []byte) bool {
|
||||
ct.merkleTrie.Update(name, false)
|
||||
return true
|
||||
})
|
||||
fmt.Printf(" Done. Now recomputing all hashes...\n")
|
||||
return true
|
||||
}
|
||||
|
||||
func removeDuplicates(names [][]byte) [][]byte { // this might be too expensive; we'll have to profile it
|
||||
sort.Slice(names, func(i, j int) bool { // put names in order so we can skip duplicates
|
||||
return bytes.Compare(names[i], names[j]) < 0
|
||||
})
|
||||
|
||||
for i := len(names) - 2; i >= 0; i-- {
|
||||
if bytes.Equal(names[i], names[i+1]) {
|
||||
names = append(names[:i], names[i+1:]...)
|
||||
}
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// ReportHash persists the Merkle Hash "learned and reported" by the block.
|
||||
// This is for debugging purpose.
|
||||
// So we can replay the trace of changes and compare calculated and learned hash.
|
||||
func (ct *ClaimTrie) ReportHash(height int32, hash chainhash.Hash) error {
|
||||
|
||||
if ct.reportedBlockRepo != nil {
|
||||
return ct.reportedBlockRepo.Set(height, &hash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetHeight resets the ClaimTrie to a previous known height..
|
||||
func (ct *ClaimTrie) ResetHeight(height int32) error {
|
||||
|
||||
names := make([][]byte, 0)
|
||||
for h := height + 1; h <= ct.height; h++ {
|
||||
results, err := ct.temporalRepo.NodesAt(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
names = append(names, results...)
|
||||
}
|
||||
err := ct.nodeManager.DecrementHeightTo(names, height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ct.height = height
|
||||
hash, err := ct.blockRepo.Get(height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ct.merkleTrie.SetRoot(hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MerkleHash returns the Merkle Hash of the claimTrie.
|
||||
func (ct *ClaimTrie) MerkleHash() *chainhash.Hash {
|
||||
if ct.height >= param.AllClaimsInMerkleForkHeight {
|
||||
return ct.merkleTrie.MerkleHashAllClaims()
|
||||
}
|
||||
return ct.merkleTrie.MerkleHash()
|
||||
}
|
||||
|
||||
// Height returns the current block height.
|
||||
func (ct *ClaimTrie) Height() int32 {
|
||||
return ct.height
|
||||
}
|
||||
|
||||
// Close persists states.
|
||||
// Any calls to the ClaimTrie after Close() being called results undefined behaviour.
|
||||
func (ct *ClaimTrie) Close() error {
|
||||
|
||||
for i := len(ct.cleanups) - 1; i >= 0; i-- {
|
||||
cleanup := ct.cleanups[i]
|
||||
err := cleanup()
|
||||
if err != nil { // TODO: it would be better to cleanup what we can than exit
|
||||
return fmt.Errorf("cleanup: %w", err)
|
||||
}
|
||||
}
|
||||
ct.cleanups = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ct *ClaimTrie) forwardNodeChange(chg change.Change) error {
|
||||
|
||||
chg.Height = ct.Height() + 1
|
||||
|
||||
err := ct.nodeManager.AppendChange(chg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("node manager handle change: %w", err)
|
||||
}
|
||||
|
||||
//ct.changes = append(ct.changes, chg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ct *ClaimTrie) Node(name []byte) (*node.Node, error) {
|
||||
return ct.nodeManager.Node(name)
|
||||
}
|
229
claimtrie/claimtrie_test.go
Normal file
229
claimtrie/claimtrie_test.go
Normal file
|
@ -0,0 +1,229 @@
|
|||
package claimtrie
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/claimtrie/merkletrie"
|
||||
"github.com/btcsuite/btcd/claimtrie/node"
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func setup(t *testing.T) {
|
||||
param.SetNetwork(wire.TestNet, "")
|
||||
param.ClaimtrieDataFolder = t.TempDir()
|
||||
}
|
||||
|
||||
func b(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
||||
|
||||
func buildTx(hash chainhash.Hash) *wire.MsgTx {
|
||||
tx := wire.NewMsgTx(1)
|
||||
txIn := wire.NewTxIn(wire.NewOutPoint(&hash, 0), nil, nil)
|
||||
tx.AddTxIn(txIn)
|
||||
tx.AddTxOut(wire.NewTxOut(0, nil))
|
||||
return tx
|
||||
}
|
||||
|
||||
func TestFixedHashes(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
setup(t)
|
||||
ct, err := New()
|
||||
r.NoError(err)
|
||||
defer func() {
|
||||
err = ct.Close()
|
||||
r.NoError(err)
|
||||
}()
|
||||
|
||||
r.Equal(merkletrie.EmptyTrieHash[:], ct.MerkleHash()[:])
|
||||
|
||||
tx1 := buildTx(*merkletrie.EmptyTrieHash)
|
||||
tx2 := buildTx(tx1.TxHash())
|
||||
tx3 := buildTx(tx2.TxHash())
|
||||
tx4 := buildTx(tx3.TxHash())
|
||||
|
||||
err = ct.AddClaim(b("test"), tx1.TxIn[0].PreviousOutPoint, node.NewClaimID(tx1.TxIn[0].PreviousOutPoint), 50, nil)
|
||||
r.NoError(err)
|
||||
|
||||
err = ct.AddClaim(b("test2"), tx2.TxIn[0].PreviousOutPoint, node.NewClaimID(tx2.TxIn[0].PreviousOutPoint), 50, nil)
|
||||
r.NoError(err)
|
||||
|
||||
err = ct.AddClaim(b("test"), tx3.TxIn[0].PreviousOutPoint, node.NewClaimID(tx3.TxIn[0].PreviousOutPoint), 50, nil)
|
||||
r.NoError(err)
|
||||
|
||||
err = ct.AddClaim(b("tes"), tx4.TxIn[0].PreviousOutPoint, node.NewClaimID(tx4.TxIn[0].PreviousOutPoint), 50, nil)
|
||||
r.NoError(err)
|
||||
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
|
||||
expected, err := chainhash.NewHashFromStr("938fb93364bf8184e0b649c799ae27274e8db5221f1723c99fb2acd3386cfb00")
|
||||
r.NoError(err)
|
||||
r.Equal(expected[:], ct.MerkleHash()[:])
|
||||
}
|
||||
|
||||
func TestNormalizationFork(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
setup(t)
|
||||
param.NormalizedNameForkHeight = 2
|
||||
ct, err := New()
|
||||
r.NoError(err)
|
||||
r.NotNil(ct)
|
||||
defer func() {
|
||||
err = ct.Close()
|
||||
r.NoError(err)
|
||||
}()
|
||||
|
||||
hash := chainhash.HashH([]byte{1, 2, 3})
|
||||
|
||||
o1 := wire.OutPoint{Hash: hash, Index: 1}
|
||||
err = ct.AddClaim([]byte("AÑEJO"), o1, node.NewClaimID(o1), 10, nil)
|
||||
r.NoError(err)
|
||||
|
||||
o2 := wire.OutPoint{Hash: hash, Index: 2}
|
||||
err = ct.AddClaim([]byte("AÑejo"), o2, node.NewClaimID(o2), 5, nil)
|
||||
r.NoError(err)
|
||||
|
||||
o3 := wire.OutPoint{Hash: hash, Index: 3}
|
||||
err = ct.AddClaim([]byte("あてはまる"), o3, node.NewClaimID(o3), 5, nil)
|
||||
r.NoError(err)
|
||||
|
||||
o4 := wire.OutPoint{Hash: hash, Index: 4}
|
||||
err = ct.AddClaim([]byte("Aḿlie"), o4, node.NewClaimID(o4), 5, nil)
|
||||
r.NoError(err)
|
||||
|
||||
o5 := wire.OutPoint{Hash: hash, Index: 5}
|
||||
err = ct.AddClaim([]byte("TEST"), o5, node.NewClaimID(o5), 5, nil)
|
||||
r.NoError(err)
|
||||
|
||||
o6 := wire.OutPoint{Hash: hash, Index: 6}
|
||||
err = ct.AddClaim([]byte("test"), o6, node.NewClaimID(o6), 7, nil)
|
||||
r.NoError(err)
|
||||
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
r.NotEqual(merkletrie.EmptyTrieHash[:], ct.MerkleHash()[:])
|
||||
|
||||
n, err := ct.nodeManager.Node([]byte("AÑEJO"))
|
||||
r.NoError(err)
|
||||
r.NotNil(n.BestClaim)
|
||||
r.Equal(int32(1), n.TakenOverAt)
|
||||
|
||||
o7 := wire.OutPoint{Hash: hash, Index: 7}
|
||||
err = ct.AddClaim([]byte("aÑEJO"), o7, node.NewClaimID(o7), 8, nil)
|
||||
r.NoError(err)
|
||||
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
r.NotEqual(merkletrie.EmptyTrieHash[:], ct.MerkleHash()[:])
|
||||
|
||||
n, err = ct.nodeManager.Node([]byte("añejo"))
|
||||
r.NoError(err)
|
||||
r.Equal(3, len(n.Claims))
|
||||
r.Equal(uint32(1), n.BestClaim.OutPoint.Index)
|
||||
r.Equal(int32(2), n.TakenOverAt)
|
||||
}
|
||||
|
||||
func TestActivationsOnNormalizationFork(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
setup(t)
|
||||
param.NormalizedNameForkHeight = 4
|
||||
ct, err := New()
|
||||
r.NoError(err)
|
||||
r.NotNil(ct)
|
||||
defer func() {
|
||||
err = ct.Close()
|
||||
r.NoError(err)
|
||||
}()
|
||||
|
||||
hash := chainhash.HashH([]byte{1, 2, 3})
|
||||
|
||||
o7 := wire.OutPoint{Hash: hash, Index: 7}
|
||||
err = ct.AddClaim([]byte("A"), o7, node.NewClaimID(o7), 1, nil)
|
||||
r.NoError(err)
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
verifyBestIndex(t, ct, "A", 7, 1)
|
||||
|
||||
o8 := wire.OutPoint{Hash: hash, Index: 8}
|
||||
err = ct.AddClaim([]byte("A"), o8, node.NewClaimID(o8), 2, nil)
|
||||
r.NoError(err)
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
verifyBestIndex(t, ct, "a", 8, 2)
|
||||
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
verifyBestIndex(t, ct, "a", 8, 2)
|
||||
|
||||
err = ct.ResetHeight(3)
|
||||
r.NoError(err)
|
||||
verifyBestIndex(t, ct, "A", 7, 1)
|
||||
}
|
||||
|
||||
func TestNormalizationSortOrder(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
// this was an unfortunate bug; the normalization fork should not have activated anything
|
||||
// alas, it's now part of our history; we hereby test it to keep it that way
|
||||
setup(t)
|
||||
param.NormalizedNameForkHeight = 2
|
||||
ct, err := New()
|
||||
r.NoError(err)
|
||||
r.NotNil(ct)
|
||||
defer func() {
|
||||
err := ct.Close()
|
||||
r.NoError(err)
|
||||
}()
|
||||
|
||||
hash := chainhash.HashH([]byte{1, 2, 3})
|
||||
|
||||
o1 := wire.OutPoint{Hash: hash, Index: 1}
|
||||
err = ct.AddClaim([]byte("A"), o1, node.NewClaimID(o1), 1, nil)
|
||||
r.NoError(err)
|
||||
|
||||
o2 := wire.OutPoint{Hash: hash, Index: 2}
|
||||
err = ct.AddClaim([]byte("A"), o2, node.NewClaimID(o2), 2, nil)
|
||||
r.NoError(err)
|
||||
|
||||
o3 := wire.OutPoint{Hash: hash, Index: 3}
|
||||
err = ct.AddClaim([]byte("a"), o3, node.NewClaimID(o3), 3, nil)
|
||||
r.NoError(err)
|
||||
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
verifyBestIndex(t, ct, "A", 2, 2)
|
||||
verifyBestIndex(t, ct, "a", 3, 1)
|
||||
|
||||
err = ct.AppendBlock()
|
||||
r.NoError(err)
|
||||
verifyBestIndex(t, ct, "a", 3, 3)
|
||||
}
|
||||
|
||||
func verifyBestIndex(t *testing.T, ct *ClaimTrie, name string, idx uint32, claims int) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
n, err := ct.nodeManager.Node([]byte(name))
|
||||
r.NoError(err)
|
||||
r.Equal(claims, len(n.Claims))
|
||||
if claims > 0 {
|
||||
r.Equal(idx, n.BestClaim.OutPoint.Index)
|
||||
}
|
||||
}
|
165
claimtrie/cmd/cmd/block.go
Normal file
165
claimtrie/cmd/cmd/block.go
Normal file
|
@ -0,0 +1,165 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/block/blockrepo"
|
||||
"github.com/btcsuite/btcd/claimtrie/config"
|
||||
"github.com/btcsuite/btcd/claimtrie/merkletrie"
|
||||
"github.com/btcsuite/btcd/claimtrie/merkletrie/merkletrierepo"
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
"github.com/btcsuite/btcd/claimtrie/temporal/temporalrepo"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var localConfig *config.DBConfig
|
||||
|
||||
func init() {
|
||||
param.SetNetwork(wire.MainNet, "mainnet")
|
||||
localConfig = config.GenerateConfig(param.ClaimtrieDataFolder)
|
||||
|
||||
rootCmd.AddCommand(blockCmd)
|
||||
|
||||
blockCmd.AddCommand(blockLastCmd)
|
||||
blockCmd.AddCommand(blockListCmd)
|
||||
blockCmd.AddCommand(blockNameCmd)
|
||||
}
|
||||
|
||||
var blockCmd = &cobra.Command{
|
||||
Use: "block",
|
||||
Short: "Block related commands",
|
||||
}
|
||||
|
||||
var blockLastCmd = &cobra.Command{
|
||||
Use: "last",
|
||||
Short: "Show the Merkle Hash of the last block",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
repo, err := blockrepo.NewPebble(localConfig.ReportedBlockRepoPebble.Path)
|
||||
if err != nil {
|
||||
log.Fatalf("can't open reported block repo: %s", err)
|
||||
}
|
||||
|
||||
last, err := repo.Load()
|
||||
if err != nil {
|
||||
return fmt.Errorf("load previous height")
|
||||
}
|
||||
|
||||
hash, err := repo.Get(last)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load changes from repo: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("blk %-7d: %s\n", last, hash.String())
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var blockListCmd = &cobra.Command{
|
||||
Use: "list <from_height> [<to_height>]",
|
||||
Short: "List the Merkle Hash of block in a range of heights",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
repo, err := blockrepo.NewPebble(localConfig.ReportedBlockRepoPebble.Path)
|
||||
if err != nil {
|
||||
log.Fatalf("can't open reported block repo: %s", err)
|
||||
}
|
||||
|
||||
fromHeight, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
|
||||
toHeight := fromHeight + 1
|
||||
if len(args) == 2 {
|
||||
toHeight, err = strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
}
|
||||
|
||||
last, err := repo.Load()
|
||||
if err != nil {
|
||||
return fmt.Errorf("load previous height")
|
||||
}
|
||||
|
||||
if toHeight >= int(last) {
|
||||
toHeight = int(last)
|
||||
}
|
||||
|
||||
for i := fromHeight; i < toHeight; i++ {
|
||||
hash, err := repo.Get(int32(i))
|
||||
if err != nil {
|
||||
return fmt.Errorf("load changes from repo: %w", err)
|
||||
}
|
||||
fmt.Printf("blk %-7d: %s\n", i, hash.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var blockNameCmd = &cobra.Command{
|
||||
Use: "vertex <height> [<name>]",
|
||||
Short: "List the claim and child hashes at vertex name of block at height",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
repo, err := blockrepo.NewPebble(localConfig.BlockRepoPebble.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open reported block repo: %w", err)
|
||||
}
|
||||
defer repo.Close()
|
||||
|
||||
height, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
|
||||
last, err := repo.Load()
|
||||
if err != nil {
|
||||
return fmt.Errorf("load previous height: %w", err)
|
||||
}
|
||||
|
||||
if last < int32(height) {
|
||||
return fmt.Errorf("requested height is unavailable")
|
||||
}
|
||||
|
||||
hash, err := repo.Get(int32(height))
|
||||
if err != nil {
|
||||
return fmt.Errorf("load previous height: %w", err)
|
||||
}
|
||||
|
||||
trieRepo, err := merkletrierepo.NewPebble(localConfig.MerkleTrieRepoPebble.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open merkle trie repo: %w", err)
|
||||
}
|
||||
|
||||
trie := merkletrie.New(nil, trieRepo)
|
||||
defer trie.Close()
|
||||
trie.SetRoot(hash)
|
||||
if len(args) > 1 {
|
||||
trie.Dump(args[1], param.AllClaimsInMerkleForkHeight >= int32(height))
|
||||
} else {
|
||||
tmpRepo, err := temporalrepo.NewPebble(localConfig.TemporalRepoPebble.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open temporal repo: %w", err)
|
||||
}
|
||||
nodes, err := tmpRepo.NodesAt(int32(height))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't read temporal repo at %d: %w", height, err)
|
||||
}
|
||||
for _, name := range nodes {
|
||||
fmt.Printf("Name: %s, ", string(name))
|
||||
trie.Dump(string(name), param.AllClaimsInMerkleForkHeight >= int32(height))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
200
claimtrie/cmd/cmd/chain.go
Normal file
200
claimtrie/cmd/cmd/chain.go
Normal file
|
@ -0,0 +1,200 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie"
|
||||
"github.com/btcsuite/btcd/claimtrie/block"
|
||||
"github.com/btcsuite/btcd/claimtrie/block/blockrepo"
|
||||
"github.com/btcsuite/btcd/claimtrie/chain/chainrepo"
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/btcsuite/btcd/claimtrie/node"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(chainCmd)
|
||||
|
||||
chainCmd.AddCommand(chainDumpCmd)
|
||||
chainCmd.AddCommand(chainReplayCmd)
|
||||
}
|
||||
|
||||
var chainCmd = &cobra.Command{
|
||||
Use: "chain",
|
||||
Short: "chain related command",
|
||||
}
|
||||
|
||||
var chainDumpCmd = &cobra.Command{
|
||||
Use: "dump <fromHeight> [<toHeight>]",
|
||||
Short: "dump changes from <fromHeight> to [<toHeight>]",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
fromHeight, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
|
||||
toHeight := fromHeight + 1
|
||||
if len(args) == 2 {
|
||||
toHeight, err = strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
}
|
||||
|
||||
chainRepo, err := chainrepo.NewPebble(localConfig.ChainRepoPebble.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open node repo: %w", err)
|
||||
}
|
||||
|
||||
for height := fromHeight; height < toHeight; height++ {
|
||||
changes, err := chainRepo.Load(int32(height))
|
||||
if err == pebble.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("load commands: %w", err)
|
||||
}
|
||||
|
||||
for _, chg := range changes {
|
||||
if int(chg.Height) > height {
|
||||
break
|
||||
}
|
||||
showChange(chg)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var chainReplayCmd = &cobra.Command{
|
||||
Use: "replay <height>",
|
||||
Short: "Replay the chain up to <height>",
|
||||
Args: cobra.RangeArgs(0, 1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
fmt.Printf("not working until we pass record flag to claimtrie\n")
|
||||
|
||||
fromHeight := 2
|
||||
toHeight := int(math.MaxInt32)
|
||||
|
||||
var err error
|
||||
if len(args) == 1 {
|
||||
toHeight, err = strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
}
|
||||
|
||||
err = os.RemoveAll(localConfig.NodeRepoPebble.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete node repo: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Deleted node repo\n")
|
||||
|
||||
chainRepo, err := chainrepo.NewPebble(localConfig.ChainRepoPebble.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open change repo: %w", err)
|
||||
}
|
||||
|
||||
reportedBlockRepo, err := blockrepo.NewPebble(localConfig.ReportedBlockRepoPebble.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open block repo: %w", err)
|
||||
}
|
||||
|
||||
// FIXME: pass record flag into claimtrie
|
||||
ct, err := claimtrie.New()
|
||||
if err != nil {
|
||||
return fmt.Errorf("create claimtrie: %w", err)
|
||||
}
|
||||
defer ct.Close()
|
||||
|
||||
err = ct.ResetHeight(int32(fromHeight - 1))
|
||||
if err != nil {
|
||||
return fmt.Errorf("reset claimtrie height: %w", err)
|
||||
}
|
||||
|
||||
for height := int32(fromHeight); height < int32(toHeight); height++ {
|
||||
|
||||
changes, err := chainRepo.Load(height)
|
||||
if err == pebble.ErrNotFound {
|
||||
// do nothing.
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("load from change repo: %w", err)
|
||||
}
|
||||
|
||||
for _, chg := range changes {
|
||||
claimID, _ := node.NewIDFromString(chg.ClaimID)
|
||||
|
||||
switch chg.Type {
|
||||
case change.AddClaim:
|
||||
op := *node.NewOutPointFromString(chg.OutPoint)
|
||||
err = ct.AddClaim(chg.Name, op, claimID, chg.Amount, chg.Value)
|
||||
|
||||
case change.UpdateClaim:
|
||||
op := *node.NewOutPointFromString(chg.OutPoint)
|
||||
err = ct.UpdateClaim(chg.Name, op, chg.Amount, claimID, chg.Value)
|
||||
|
||||
case change.SpendClaim:
|
||||
op := *node.NewOutPointFromString(chg.OutPoint)
|
||||
err = ct.SpendClaim(chg.Name, op, claimID)
|
||||
|
||||
case change.AddSupport:
|
||||
op := *node.NewOutPointFromString(chg.OutPoint)
|
||||
claimID, _ := node.NewIDFromString(chg.ClaimID)
|
||||
id := claimID
|
||||
err = ct.AddSupport(chg.Name, chg.Value, op, chg.Amount, id)
|
||||
|
||||
case change.SpendSupport:
|
||||
op := *node.NewOutPointFromString(chg.OutPoint)
|
||||
err = ct.SpendSupport(chg.Name, op, claimID)
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("invalid change: %v", chg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("execute change %v: %w", chg, err)
|
||||
}
|
||||
}
|
||||
err = appendBlock(ct, reportedBlockRepo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ct.Height()%1000 == 0 {
|
||||
fmt.Printf("block: %d\n", ct.Height())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func appendBlock(ct *claimtrie.ClaimTrie, blockRepo block.Repo) error {
|
||||
|
||||
err := ct.AppendBlock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("append block: %w", err)
|
||||
}
|
||||
|
||||
height := ct.Height()
|
||||
|
||||
hash, err := blockRepo.Get(height)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load from block repo: %w", err)
|
||||
}
|
||||
|
||||
if *ct.MerkleHash() != *hash {
|
||||
return fmt.Errorf("hash mismatched at height %5d: exp: %s, got: %s", height, hash, ct.MerkleHash())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
108
claimtrie/cmd/cmd/node.go
Normal file
108
claimtrie/cmd/cmd/node.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/config"
|
||||
"github.com/btcsuite/btcd/claimtrie/node"
|
||||
"github.com/btcsuite/btcd/claimtrie/node/noderepo"
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
param.SetNetwork(wire.MainNet, "mainnet")
|
||||
localConfig = config.GenerateConfig(param.ClaimtrieDataFolder)
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
|
||||
nodeCmd.AddCommand(nodeDumpCmd)
|
||||
nodeCmd.AddCommand(nodeReplayCmd)
|
||||
}
|
||||
|
||||
var nodeCmd = &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Replay the application of changes on a node up to certain height",
|
||||
}
|
||||
|
||||
var nodeDumpCmd = &cobra.Command{
|
||||
Use: "dump <node_name> [<height>]",
|
||||
Short: "Replay the application of changes on a node up to certain height",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
repo, err := noderepo.NewPebble(localConfig.NodeRepoPebble.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open node repo: %w", err)
|
||||
}
|
||||
|
||||
name := args[0]
|
||||
height := math.MaxInt32
|
||||
|
||||
if len(args) == 2 {
|
||||
height, err = strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
}
|
||||
|
||||
changes, err := repo.LoadChanges([]byte(name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("load commands: %w", err)
|
||||
}
|
||||
|
||||
for _, chg := range changes {
|
||||
if int(chg.Height) > height {
|
||||
break
|
||||
}
|
||||
showChange(chg)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var nodeReplayCmd = &cobra.Command{
|
||||
Use: "replay <node_name> [<height>]",
|
||||
Short: "Replay the application of changes on a node up to certain height",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
repo, err := noderepo.NewPebble(localConfig.NodeRepoPebble.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open node repo: %w", err)
|
||||
}
|
||||
|
||||
name := []byte(args[0])
|
||||
height := math.MaxInt32
|
||||
|
||||
if len(args) == 2 {
|
||||
height, err = strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
}
|
||||
|
||||
nm, err := node.NewBaseManager(repo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create node manager: %w", err)
|
||||
}
|
||||
nm = node.NewNormalizingManager(nm)
|
||||
|
||||
_, err = nm.IncrementHeightTo(int32(height))
|
||||
if err != nil {
|
||||
return fmt.Errorf("increment height: %w", err)
|
||||
}
|
||||
|
||||
n, err := nm.Node(name)
|
||||
if err != nil || n == nil {
|
||||
return fmt.Errorf("get node: %w", err)
|
||||
}
|
||||
|
||||
showNode(n)
|
||||
return nil
|
||||
},
|
||||
}
|
15
claimtrie/cmd/cmd/root.go
Normal file
15
claimtrie/cmd/cmd/root.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "claimtrie",
|
||||
Short: "ClaimTrie Command Line Interface",
|
||||
SilenceUsage: true,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
rootCmd.Execute() // nolint : errchk
|
||||
}
|
67
claimtrie/cmd/cmd/temporal.go
Normal file
67
claimtrie/cmd/cmd/temporal.go
Normal file
|
@ -0,0 +1,67 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/config"
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
"github.com/btcsuite/btcd/claimtrie/temporal/temporalrepo"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
param.SetNetwork(wire.MainNet, "mainnet")
|
||||
localConfig = config.GenerateConfig(param.ClaimtrieDataFolder)
|
||||
rootCmd.AddCommand(temporalCmd)
|
||||
}
|
||||
|
||||
var temporalCmd = &cobra.Command{
|
||||
Use: "temporal <from_height> [<to_height>]]",
|
||||
Short: "List which nodes are update in a range of heights",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: runListNodes,
|
||||
}
|
||||
|
||||
func runListNodes(cmd *cobra.Command, args []string) error {
|
||||
|
||||
repo, err := temporalrepo.NewPebble(localConfig.TemporalRepoPebble.Path)
|
||||
if err != nil {
|
||||
log.Fatalf("can't open reported block repo: %s", err)
|
||||
}
|
||||
|
||||
fromHeight, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
|
||||
toHeight := fromHeight + 1
|
||||
if len(args) == 2 {
|
||||
toHeight, err = strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid args")
|
||||
}
|
||||
}
|
||||
|
||||
for height := fromHeight; height < toHeight; height++ {
|
||||
names, err := repo.NodesAt(int32(height))
|
||||
if err != nil {
|
||||
return fmt.Errorf("get node names from temporal")
|
||||
}
|
||||
|
||||
if len(names) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("%7d: %q", height, names[0])
|
||||
for _, name := range names[1:] {
|
||||
fmt.Printf(", %q ", name)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
68
claimtrie/cmd/cmd/ui.go
Normal file
68
claimtrie/cmd/cmd/ui.go
Normal file
|
@ -0,0 +1,68 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/btcsuite/btcd/claimtrie/node"
|
||||
)
|
||||
|
||||
var status = map[node.Status]string{
|
||||
node.Accepted: "Accepted",
|
||||
node.Activated: "Activated",
|
||||
node.Deactivated: "Deactivated",
|
||||
}
|
||||
|
||||
func changeName(c change.ChangeType) string {
|
||||
switch c { // can't this be done via reflection?
|
||||
case change.AddClaim:
|
||||
return "AddClaim"
|
||||
case change.SpendClaim:
|
||||
return "SpendClaim"
|
||||
case change.UpdateClaim:
|
||||
return "UpdateClaim"
|
||||
case change.AddSupport:
|
||||
return "AddSupport"
|
||||
case change.SpendSupport:
|
||||
return "SpendSupport"
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
func showChange(chg change.Change) {
|
||||
fmt.Printf(">>> Height: %6d: %s for %04s, %d, %s\n",
|
||||
chg.Height, changeName(chg.Type), chg.ClaimID, chg.Amount, chg.OutPoint)
|
||||
}
|
||||
|
||||
func showClaim(c *node.Claim, n *node.Node) {
|
||||
mark := " "
|
||||
if c == n.BestClaim {
|
||||
mark = "*"
|
||||
}
|
||||
|
||||
fmt.Printf("%s C ID: %s, TXO: %s\n %5d/%-5d, Status: %9s, Amount: %15d, Effective Amount: %15d\n",
|
||||
mark, c.ClaimID, c.OutPoint, c.AcceptedAt, c.ActiveAt, status[c.Status], c.Amount, c.EffectiveAmount(n.Supports))
|
||||
}
|
||||
|
||||
func showSupport(c *node.Claim) {
|
||||
fmt.Printf(" S id: %s, op: %s, %5d/%-5d, %9s, amt: %15d\n",
|
||||
c.ClaimID, c.OutPoint, c.AcceptedAt, c.ActiveAt, status[c.Status], c.Amount)
|
||||
}
|
||||
|
||||
func showNode(n *node.Node) {
|
||||
|
||||
fmt.Printf("%s\n", strings.Repeat("-", 200))
|
||||
fmt.Printf("Last Node Takeover: %d\n\n", n.TakenOverAt)
|
||||
n.SortClaims()
|
||||
for _, c := range n.Claims {
|
||||
showClaim(c, n)
|
||||
for _, s := range n.Supports {
|
||||
if s.ClaimID != c.ClaimID {
|
||||
continue
|
||||
}
|
||||
showSupport(s)
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n\n")
|
||||
}
|
9
claimtrie/cmd/main.go
Normal file
9
claimtrie/cmd/main.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/claimtrie/cmd/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
40
claimtrie/config/config.go
Normal file
40
claimtrie/config/config.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func GenerateConfig(folder string) *DBConfig {
|
||||
return &DBConfig{
|
||||
BlockRepoPebble: pebbleConfig{
|
||||
Path: filepath.Join(folder, "blocks_pebble_db"),
|
||||
},
|
||||
NodeRepoPebble: pebbleConfig{
|
||||
Path: filepath.Join(folder, "node_change_pebble_db"),
|
||||
},
|
||||
TemporalRepoPebble: pebbleConfig{
|
||||
Path: filepath.Join(folder, "temporal_pebble_db"),
|
||||
},
|
||||
MerkleTrieRepoPebble: pebbleConfig{
|
||||
Path: filepath.Join(folder, "merkletrie_pebble_db"),
|
||||
},
|
||||
ReportedBlockRepoPebble: pebbleConfig{
|
||||
Path: filepath.Join(folder, "reported_blocks_pebble_db"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// DBConfig is the container of all configurations.
|
||||
type DBConfig struct {
|
||||
BlockRepoPebble pebbleConfig
|
||||
NodeRepoPebble pebbleConfig
|
||||
TemporalRepoPebble pebbleConfig
|
||||
MerkleTrieRepoPebble pebbleConfig
|
||||
|
||||
ChainRepoPebble pebbleConfig
|
||||
ReportedBlockRepoPebble pebbleConfig
|
||||
}
|
||||
|
||||
type pebbleConfig struct {
|
||||
Path string
|
||||
}
|
28
claimtrie/log.go
Normal file
28
claimtrie/log.go
Normal file
|
@ -0,0 +1,28 @@
|
|||
package claimtrie
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
)
|
||||
|
||||
// log is a logger that is initialized with no output filters. This
|
||||
// means the package will not perform any logging by default until the caller
|
||||
// requests it.
|
||||
var log btclog.Logger
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
DisableLog()
|
||||
}
|
||||
|
||||
// DisableLog disables all library log output. Logging output is disabled
|
||||
// by default until either UseLogger or SetLogWriter are called.
|
||||
func DisableLog() {
|
||||
log = btclog.Disabled
|
||||
}
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info.
|
||||
// This should be used in preference to SetLogWriter if the caller is also
|
||||
// using btclog.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
}
|
29
claimtrie/merkletrie/hashfunc.go
Normal file
29
claimtrie/merkletrie/hashfunc.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
package merkletrie
|
||||
|
||||
import "github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
|
||||
func hashMerkleBranches(left *chainhash.Hash, right *chainhash.Hash) *chainhash.Hash {
|
||||
// Concatenate the left and right nodes.
|
||||
var hash [chainhash.HashSize * 2]byte
|
||||
copy(hash[:chainhash.HashSize], left[:])
|
||||
copy(hash[chainhash.HashSize:], right[:])
|
||||
|
||||
newHash := chainhash.DoubleHashH(hash[:])
|
||||
return &newHash
|
||||
}
|
||||
|
||||
func computeMerkleRoot(hashes []*chainhash.Hash) *chainhash.Hash {
|
||||
if len(hashes) <= 0 {
|
||||
return nil
|
||||
}
|
||||
for len(hashes) > 1 {
|
||||
if (len(hashes) & 1) > 0 { // odd count
|
||||
hashes = append(hashes, hashes[len(hashes)-1])
|
||||
}
|
||||
for i := 0; i < len(hashes); i += 2 { // TODO: parallelize this loop (or use a lib that does it)
|
||||
hashes[i>>1] = hashMerkleBranches(hashes[i], hashes[i+1])
|
||||
}
|
||||
hashes = hashes[:len(hashes)>>1]
|
||||
}
|
||||
return hashes[0]
|
||||
}
|
275
claimtrie/merkletrie/merkletrie.go
Normal file
275
claimtrie/merkletrie/merkletrie.go
Normal file
|
@ -0,0 +1,275 @@
|
|||
package merkletrie
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/cockroachdb/pebble"
|
||||
)
|
||||
|
||||
var (
|
||||
// EmptyTrieHash represents the Merkle Hash of an empty MerkleTrie.
|
||||
// "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
EmptyTrieHash = &chainhash.Hash{1}
|
||||
NoChildrenHash = &chainhash.Hash{2}
|
||||
NoClaimsHash = &chainhash.Hash{3}
|
||||
)
|
||||
|
||||
// ValueStore enables MerkleTrie to query node values from different implementations.
|
||||
type ValueStore interface {
|
||||
ClaimHashes(name []byte) []*chainhash.Hash
|
||||
Hash(name []byte) *chainhash.Hash
|
||||
}
|
||||
|
||||
// MerkleTrie implements a 256-way prefix tree.
|
||||
type MerkleTrie struct {
|
||||
store ValueStore
|
||||
repo Repo
|
||||
|
||||
root *vertex
|
||||
bufs *sync.Pool
|
||||
}
|
||||
|
||||
// New returns a MerkleTrie.
|
||||
func New(store ValueStore, repo Repo) *MerkleTrie {
|
||||
|
||||
tr := &MerkleTrie{
|
||||
store: store,
|
||||
repo: repo,
|
||||
bufs: &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
root: newVertex(EmptyTrieHash),
|
||||
}
|
||||
|
||||
return tr
|
||||
}
|
||||
|
||||
// SetRoot drops all resolved nodes in the MerkleTrie, and set the root with specified hash.
|
||||
func (t *MerkleTrie) SetRoot(h *chainhash.Hash) {
|
||||
t.root = newVertex(h)
|
||||
}
|
||||
|
||||
// Update updates the nodes along the path to the key.
|
||||
// Each node is resolved or created with their Hash cleared.
|
||||
func (t *MerkleTrie) Update(name []byte, restoreChildren bool) {
|
||||
|
||||
n := t.root
|
||||
for i, ch := range name {
|
||||
if restoreChildren && len(n.childLinks) == 0 {
|
||||
t.resolveChildLinks(n, name[:i])
|
||||
}
|
||||
if n.childLinks[ch] == nil {
|
||||
n.childLinks[ch] = newVertex(nil)
|
||||
}
|
||||
n.merkleHash = nil
|
||||
n = n.childLinks[ch]
|
||||
}
|
||||
|
||||
if restoreChildren && len(n.childLinks) == 0 {
|
||||
t.resolveChildLinks(n, name)
|
||||
}
|
||||
n.hasValue = true
|
||||
n.merkleHash = nil
|
||||
n.claimsHash = nil
|
||||
}
|
||||
|
||||
// resolveChildLinks updates the links on n
|
||||
func (t *MerkleTrie) resolveChildLinks(n *vertex, key []byte) {
|
||||
|
||||
if n.merkleHash == nil {
|
||||
return
|
||||
}
|
||||
|
||||
b := t.bufs.Get().(*bytes.Buffer)
|
||||
defer t.bufs.Put(b)
|
||||
b.Reset()
|
||||
b.Write(key)
|
||||
b.Write(n.merkleHash[:])
|
||||
|
||||
result, closer, err := t.repo.Get(b.Bytes())
|
||||
if err == pebble.ErrNotFound { // TODO: leaky abstraction
|
||||
return
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
nb := nbuf(result)
|
||||
n.hasValue, n.claimsHash = nb.hasValue()
|
||||
for i := 0; i < nb.entries(); i++ {
|
||||
p, h := nb.entry(i)
|
||||
n.childLinks[p] = newVertex(h)
|
||||
}
|
||||
}
|
||||
|
||||
// MerkleHash returns the Merkle Hash of the MerkleTrie.
|
||||
// All nodes must have been resolved before calling this function.
|
||||
func (t *MerkleTrie) MerkleHash() *chainhash.Hash {
|
||||
buf := make([]byte, 0, 256)
|
||||
if h := t.merkle(buf, t.root); h == nil {
|
||||
return EmptyTrieHash
|
||||
}
|
||||
return t.root.merkleHash
|
||||
}
|
||||
|
||||
// merkle recursively resolves the hashes of the node.
|
||||
// All nodes must have been resolved before calling this function.
|
||||
func (t *MerkleTrie) merkle(prefix []byte, v *vertex) *chainhash.Hash {
|
||||
if v.merkleHash != nil {
|
||||
return v.merkleHash
|
||||
}
|
||||
|
||||
b := t.bufs.Get().(*bytes.Buffer)
|
||||
defer t.bufs.Put(b)
|
||||
b.Reset()
|
||||
|
||||
keys := keysInOrder(v)
|
||||
|
||||
for _, ch := range keys {
|
||||
child := v.childLinks[ch]
|
||||
if child == nil {
|
||||
continue
|
||||
}
|
||||
p := append(prefix, ch)
|
||||
h := t.merkle(p, child)
|
||||
if h != nil {
|
||||
b.WriteByte(ch) // nolint : errchk
|
||||
b.Write(h[:]) // nolint : errchk
|
||||
}
|
||||
if h == nil || len(prefix) > 4 { // TODO: determine the right number here
|
||||
delete(v.childLinks, ch) // keep the RAM down (they get recreated on Update)
|
||||
}
|
||||
}
|
||||
|
||||
if v.hasValue {
|
||||
claimHash := v.claimsHash
|
||||
if claimHash == nil {
|
||||
claimHash = t.store.Hash(prefix)
|
||||
v.claimsHash = claimHash
|
||||
}
|
||||
if claimHash != nil {
|
||||
b.Write(claimHash[:])
|
||||
} else {
|
||||
v.hasValue = false
|
||||
}
|
||||
}
|
||||
|
||||
if b.Len() > 0 {
|
||||
h := chainhash.DoubleHashH(b.Bytes())
|
||||
v.merkleHash = &h
|
||||
t.repo.Set(append(prefix, h[:]...), b.Bytes())
|
||||
}
|
||||
|
||||
return v.merkleHash
|
||||
}
|
||||
|
||||
func keysInOrder(v *vertex) []byte {
|
||||
keys := make([]byte, 0, len(v.childLinks))
|
||||
for key := range v.childLinks {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
|
||||
return keys
|
||||
}
|
||||
|
||||
func (t *MerkleTrie) MerkleHashAllClaims() *chainhash.Hash {
|
||||
buf := make([]byte, 0, 256)
|
||||
if h := t.merkleAllClaims(buf, t.root); h == nil {
|
||||
return EmptyTrieHash
|
||||
}
|
||||
return t.root.merkleHash
|
||||
}
|
||||
|
||||
func (t *MerkleTrie) merkleAllClaims(prefix []byte, v *vertex) *chainhash.Hash {
|
||||
if v.merkleHash != nil {
|
||||
return v.merkleHash
|
||||
}
|
||||
b := t.bufs.Get().(*bytes.Buffer)
|
||||
defer t.bufs.Put(b)
|
||||
b.Reset()
|
||||
|
||||
keys := keysInOrder(v)
|
||||
childHashes := make([]*chainhash.Hash, 0, len(keys))
|
||||
for _, ch := range keys {
|
||||
n := v.childLinks[ch]
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
p := append(prefix, ch)
|
||||
h := t.merkleAllClaims(p, n)
|
||||
if h != nil {
|
||||
childHashes = append(childHashes, h)
|
||||
b.WriteByte(ch) // nolint : errchk
|
||||
b.Write(h[:]) // nolint : errchk
|
||||
}
|
||||
if h == nil || len(prefix) > 4 { // TODO: determine the right number here
|
||||
delete(v.childLinks, ch) // keep the RAM down (they get recreated on Update)
|
||||
}
|
||||
}
|
||||
|
||||
var claimsHash *chainhash.Hash
|
||||
if v.hasValue {
|
||||
claimsHash = v.claimsHash
|
||||
if claimsHash == nil {
|
||||
claimHashes := t.store.ClaimHashes(prefix)
|
||||
if len(claimHashes) > 0 {
|
||||
claimsHash = computeMerkleRoot(claimHashes)
|
||||
v.claimsHash = claimsHash
|
||||
} else {
|
||||
v.hasValue = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(childHashes) > 1 || claimsHash != nil { // yeah, about that 1 there -- old code used the condensed trie
|
||||
left := NoChildrenHash
|
||||
if len(childHashes) > 0 {
|
||||
left = computeMerkleRoot(childHashes)
|
||||
}
|
||||
right := NoClaimsHash
|
||||
if claimsHash != nil {
|
||||
b.Write(claimsHash[:]) // for Has Value, nolint : errchk
|
||||
right = claimsHash
|
||||
}
|
||||
|
||||
h := hashMerkleBranches(left, right)
|
||||
v.merkleHash = h
|
||||
t.repo.Set(append(prefix, h[:]...), b.Bytes())
|
||||
} else if len(childHashes) == 1 {
|
||||
v.merkleHash = childHashes[0] // pass it up the tree
|
||||
t.repo.Set(append(prefix, v.merkleHash[:]...), b.Bytes())
|
||||
}
|
||||
|
||||
return v.merkleHash
|
||||
}
|
||||
|
||||
func (t *MerkleTrie) Close() error {
|
||||
return t.repo.Close()
|
||||
}
|
||||
|
||||
func (t *MerkleTrie) Dump(s string, allClaims bool) {
|
||||
v := t.root
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
t.resolveChildLinks(v, []byte(s[:i]))
|
||||
ch := s[i]
|
||||
v = v.childLinks[ch]
|
||||
if v == nil {
|
||||
fmt.Printf("Missing child at %s\n", s[:i+1])
|
||||
return
|
||||
}
|
||||
}
|
||||
t.resolveChildLinks(v, []byte(s))
|
||||
|
||||
fmt.Printf("Node hash: %s, has value: %t\n", v.merkleHash.String(), v.hasValue)
|
||||
|
||||
for key, value := range v.childLinks {
|
||||
fmt.Printf(" Child %s hash: %s\n", string(key), value.merkleHash.String())
|
||||
}
|
||||
}
|
24
claimtrie/merkletrie/merkletrie_test.go
Normal file
24
claimtrie/merkletrie/merkletrie_test.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package merkletrie
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
target, _ := chainhash.NewHashFromStr("e9ffb584c62449f157c8be88257bd1eebb2d8ef824f5c86b43c4f8fd9e800d6a")
|
||||
|
||||
data := []*chainhash.Hash{EmptyTrieHash}
|
||||
root := computeMerkleRoot(data)
|
||||
r.True(EmptyTrieHash.IsEqual(root))
|
||||
|
||||
data = append(data, NoChildrenHash, NoClaimsHash)
|
||||
root = computeMerkleRoot(data)
|
||||
r.True(target.IsEqual(root))
|
||||
}
|
69
claimtrie/merkletrie/merkletrierepo/pebble.go
Normal file
69
claimtrie/merkletrie/merkletrierepo/pebble.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
package merkletrierepo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
type Pebble struct {
|
||||
db *pebble.DB
|
||||
}
|
||||
|
||||
func NewPebble(path string) (*Pebble, error) {
|
||||
|
||||
cache := pebble.NewCache(512 << 20)
|
||||
defer cache.Unref()
|
||||
|
||||
go func() {
|
||||
tick := time.NewTicker(60 * time.Second)
|
||||
for range tick.C {
|
||||
|
||||
m := cache.Metrics()
|
||||
fmt.Printf("cnt: %s, objs: %s, hits: %s, miss: %s, hitrate: %.2f\n",
|
||||
humanize.Bytes(uint64(m.Size)),
|
||||
humanize.Comma(m.Count),
|
||||
humanize.Comma(m.Hits),
|
||||
humanize.Comma(m.Misses),
|
||||
float64(m.Hits)/float64(m.Hits+m.Misses))
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
db, err := pebble.Open(path, &pebble.Options{Cache: cache, BytesPerSync: 32 << 20})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pebble open %s, %w", path, err)
|
||||
}
|
||||
|
||||
repo := &Pebble{
|
||||
db: db,
|
||||
}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (repo *Pebble) Get(key []byte) ([]byte, io.Closer, error) {
|
||||
return repo.db.Get(key)
|
||||
}
|
||||
|
||||
func (repo *Pebble) Set(key, value []byte) error {
|
||||
return repo.db.Set(key, value, pebble.NoSync)
|
||||
}
|
||||
|
||||
func (repo *Pebble) Close() error {
|
||||
|
||||
err := repo.db.Flush()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble fludh: %w", err)
|
||||
}
|
||||
|
||||
err = repo.db.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble close: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
12
claimtrie/merkletrie/repo.go
Normal file
12
claimtrie/merkletrie/repo.go
Normal file
|
@ -0,0 +1,12 @@
|
|||
package merkletrie
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Repo defines APIs for MerkleTrie to access persistence layer.
|
||||
type Repo interface {
|
||||
Get(key []byte) ([]byte, io.Closer, error)
|
||||
Set(key, value []byte) error
|
||||
Close() error
|
||||
}
|
44
claimtrie/merkletrie/vertex.go
Normal file
44
claimtrie/merkletrie/vertex.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package merkletrie
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
)
|
||||
|
||||
type vertex struct {
|
||||
merkleHash *chainhash.Hash
|
||||
claimsHash *chainhash.Hash
|
||||
childLinks map[byte]*vertex
|
||||
hasValue bool
|
||||
}
|
||||
|
||||
func newVertex(hash *chainhash.Hash) *vertex {
|
||||
return &vertex{childLinks: map[byte]*vertex{}, merkleHash: hash}
|
||||
}
|
||||
|
||||
// TODO: more professional to use msgpack here?
|
||||
|
||||
// nbuf decodes the on-disk format of a node, which has the following form:
|
||||
// ch(1B) hash(32B)
|
||||
// ...
|
||||
// ch(1B) hash(32B)
|
||||
// vhash(32B)
|
||||
type nbuf []byte
|
||||
|
||||
func (nb nbuf) entries() int {
|
||||
return len(nb) / 33
|
||||
}
|
||||
|
||||
func (nb nbuf) entry(i int) (byte, *chainhash.Hash) {
|
||||
h := chainhash.Hash{}
|
||||
copy(h[:], nb[33*i+1:])
|
||||
return nb[33*i], &h
|
||||
}
|
||||
|
||||
func (nb nbuf) hasValue() (bool, *chainhash.Hash) {
|
||||
if len(nb)%33 == 0 {
|
||||
return false, nil
|
||||
}
|
||||
h := chainhash.Hash{}
|
||||
copy(h[:], nb[len(nb)-32:])
|
||||
return true, &h
|
||||
}
|
152
claimtrie/node/claim.go
Normal file
152
claimtrie/node/claim.go
Normal file
|
@ -0,0 +1,152 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
)
|
||||
|
||||
// ClaimID represents a Claim's ClaimID.
|
||||
type ClaimID [20]byte
|
||||
|
||||
// NewClaimID returns a Claim ID caclculated from Ripemd160(Sha256(OUTPOINT).
|
||||
func NewClaimID(op wire.OutPoint) ClaimID {
|
||||
|
||||
w := bytes.NewBuffer(op.Hash[:])
|
||||
if err := binary.Write(w, binary.BigEndian, op.Index); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var id ClaimID
|
||||
copy(id[:], btcutil.Hash160(w.Bytes()))
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
// NewIDFromString returns a Claim ID from a string.
|
||||
func NewIDFromString(s string) (ClaimID, error) {
|
||||
|
||||
var id ClaimID
|
||||
_, err := hex.Decode(id[:], []byte(s))
|
||||
for i, j := 0, len(id)-1; i < j; i, j = i+1, j-1 {
|
||||
id[i], id[j] = id[j], id[i]
|
||||
}
|
||||
|
||||
return id, err
|
||||
}
|
||||
|
||||
func (id ClaimID) String() string {
|
||||
|
||||
for i, j := 0, len(id)-1; i < j; i, j = i+1, j-1 {
|
||||
id[i], id[j] = id[j], id[i]
|
||||
}
|
||||
|
||||
return hex.EncodeToString(id[:])
|
||||
}
|
||||
|
||||
type Status int
|
||||
|
||||
const (
|
||||
Accepted Status = iota
|
||||
Activated
|
||||
Deactivated
|
||||
)
|
||||
|
||||
// Claim defines a structure of stake, which could be a Claim or Support.
|
||||
type Claim struct {
|
||||
OutPoint wire.OutPoint
|
||||
ClaimID string
|
||||
Amount int64
|
||||
AcceptedAt int32 // when arrived (aka, originally landed in block)
|
||||
ActiveAt int32 // AcceptedAt + actual delay
|
||||
Status Status
|
||||
Value []byte
|
||||
VisibleAt int32
|
||||
}
|
||||
|
||||
func (c *Claim) setOutPoint(op wire.OutPoint) *Claim {
|
||||
c.OutPoint = op
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Claim) SetAmt(amt int64) *Claim {
|
||||
c.Amount = amt
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Claim) setAccepted(height int32) *Claim {
|
||||
c.AcceptedAt = height
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Claim) setActiveAt(height int32) *Claim {
|
||||
c.ActiveAt = height
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Claim) SetValue(value []byte) *Claim {
|
||||
c.Value = value
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Claim) setStatus(status Status) *Claim {
|
||||
c.Status = status
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Claim) EffectiveAmount(supports ClaimList) int64 {
|
||||
|
||||
if c.Status != Activated {
|
||||
return 0
|
||||
}
|
||||
|
||||
amt := c.Amount
|
||||
|
||||
for _, s := range supports {
|
||||
if s.Status == Activated && s.ClaimID == c.ClaimID { // TODO: this comparison is hit a lot; byte comparison instead of hex would be faster
|
||||
amt += s.Amount
|
||||
}
|
||||
}
|
||||
|
||||
return amt
|
||||
}
|
||||
|
||||
func (c *Claim) ExpireAt() int32 {
|
||||
|
||||
if c.AcceptedAt+param.OriginalClaimExpirationTime > param.ExtendedClaimExpirationForkHeight {
|
||||
return c.AcceptedAt + param.ExtendedClaimExpirationTime
|
||||
}
|
||||
|
||||
return c.AcceptedAt + param.OriginalClaimExpirationTime
|
||||
}
|
||||
|
||||
func OutPointLess(a, b wire.OutPoint) bool {
|
||||
|
||||
switch cmp := bytes.Compare(a.Hash[:], b.Hash[:]); {
|
||||
case cmp < 0:
|
||||
return true
|
||||
case cmp > 0:
|
||||
return false
|
||||
default:
|
||||
return a.Index < b.Index
|
||||
}
|
||||
}
|
||||
|
||||
func NewOutPointFromString(str string) *wire.OutPoint {
|
||||
|
||||
f := strings.Split(str, ":")
|
||||
if len(f) != 2 {
|
||||
return nil
|
||||
}
|
||||
hash, _ := chainhash.NewHashFromStr(f[0])
|
||||
idx, _ := strconv.Atoi(f[1])
|
||||
|
||||
return wire.NewOutPoint(hash, uint32(idx))
|
||||
}
|
30
claimtrie/node/claim_list.go
Normal file
30
claimtrie/node/claim_list.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package node
|
||||
|
||||
import "github.com/btcsuite/btcd/wire"
|
||||
|
||||
type ClaimList []*Claim
|
||||
|
||||
type comparator func(c *Claim) bool
|
||||
|
||||
func byID(id string) comparator {
|
||||
return func(c *Claim) bool {
|
||||
return c.ClaimID == id
|
||||
}
|
||||
}
|
||||
|
||||
func byOut(out wire.OutPoint) comparator {
|
||||
return func(c *Claim) bool {
|
||||
return c.OutPoint == out // assuming value comparison
|
||||
}
|
||||
}
|
||||
|
||||
func (l ClaimList) find(cmp comparator) *Claim {
|
||||
|
||||
for i := range l {
|
||||
if cmp(l[i]) {
|
||||
return l[i]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
347
claimtrie/node/manager.go
Normal file
347
claimtrie/node/manager.go
Normal file
|
@ -0,0 +1,347 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
AppendChange(chg change.Change) error
|
||||
IncrementHeightTo(height int32) ([][]byte, error)
|
||||
DecrementHeightTo(affectedNames [][]byte, height int32) error
|
||||
Height() int32
|
||||
Close() error
|
||||
Node(name []byte) (*Node, error)
|
||||
NextUpdateHeightOfNode(name []byte) ([]byte, int32)
|
||||
IterateNames(predicate func(name []byte) bool)
|
||||
ClaimHashes(name []byte) []*chainhash.Hash
|
||||
Hash(name []byte) *chainhash.Hash
|
||||
}
|
||||
|
||||
type BaseManager struct {
|
||||
repo Repo
|
||||
|
||||
height int32
|
||||
cache map[string]*Node
|
||||
changes []change.Change
|
||||
}
|
||||
|
||||
func NewBaseManager(repo Repo) (Manager, error) {
|
||||
|
||||
nm := &BaseManager{
|
||||
repo: repo,
|
||||
cache: map[string]*Node{},
|
||||
}
|
||||
|
||||
return nm, nil
|
||||
}
|
||||
|
||||
// Node returns a node at the current height.
|
||||
// The returned node may have pending changes.
|
||||
func (nm *BaseManager) Node(name []byte) (*Node, error) {
|
||||
|
||||
nameStr := string(name)
|
||||
n, ok := nm.cache[nameStr]
|
||||
if ok && n != nil {
|
||||
return n.AdjustTo(nm.height, -1, name), nil
|
||||
}
|
||||
|
||||
changes, err := nm.repo.LoadChanges(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("load changes from node repo: %w", err)
|
||||
}
|
||||
|
||||
n, err = nm.newNodeFromChanges(changes, nm.height)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create node from changes: %w", err)
|
||||
}
|
||||
|
||||
if n == nil { // they've requested a nonexistent or expired name
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
nm.cache[nameStr] = n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// newNodeFromChanges returns a new Node constructed from the changes.
|
||||
// The changes must preserve their order received.
|
||||
func (nm *BaseManager) newNodeFromChanges(changes []change.Change, height int32) (*Node, error) {
|
||||
|
||||
if len(changes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
n := New()
|
||||
previous := changes[0].Height
|
||||
count := len(changes)
|
||||
|
||||
for i, chg := range changes {
|
||||
if chg.Height < previous {
|
||||
return nil, fmt.Errorf("expected the changes to be in order by height")
|
||||
}
|
||||
if chg.Height > height {
|
||||
count = i
|
||||
break
|
||||
}
|
||||
if previous < chg.Height {
|
||||
n.AdjustTo(previous, chg.Height-1, chg.Name) // update bids and activation
|
||||
previous = chg.Height
|
||||
}
|
||||
|
||||
delay := nm.getDelayForName(n, chg)
|
||||
err := n.ApplyChange(chg, delay)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("append change: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if count <= 0 {
|
||||
return nil, nil
|
||||
}
|
||||
lastChange := changes[count-1]
|
||||
return n.AdjustTo(lastChange.Height, height, lastChange.Name), nil
|
||||
}
|
||||
|
||||
func (nm *BaseManager) AppendChange(chg change.Change) error {
|
||||
|
||||
if len(nm.changes) <= 0 {
|
||||
// this little code block is acting as a "block complete" method
|
||||
// that could be called after the merkle hash is complete
|
||||
if len(nm.cache) > param.MaxNodeManagerCacheSize {
|
||||
// TODO: use a better cache model?
|
||||
fmt.Printf("Clearing manager cache at height %d\n", nm.height)
|
||||
nm.cache = map[string]*Node{}
|
||||
}
|
||||
}
|
||||
|
||||
delete(nm.cache, string(chg.Name))
|
||||
nm.changes = append(nm.changes, chg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nm *BaseManager) IncrementHeightTo(height int32) ([][]byte, error) {
|
||||
|
||||
if height <= nm.height {
|
||||
panic("invalid height")
|
||||
}
|
||||
|
||||
names := make([][]byte, 0, len(nm.changes))
|
||||
for i := range nm.changes {
|
||||
names = append(names, nm.changes[i].Name)
|
||||
}
|
||||
|
||||
if err := nm.repo.AppendChanges(nm.changes); err != nil {
|
||||
return nil, fmt.Errorf("save changes to node repo: %w", err)
|
||||
}
|
||||
|
||||
// Truncate the buffer size to zero.
|
||||
if len(nm.changes) > 1000 { // TODO: determine a good number here
|
||||
nm.changes = nil // release the RAM
|
||||
} else {
|
||||
nm.changes = nm.changes[:0]
|
||||
}
|
||||
nm.height = height
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (nm *BaseManager) DecrementHeightTo(affectedNames [][]byte, height int32) error {
|
||||
if height >= nm.height {
|
||||
return fmt.Errorf("invalid height")
|
||||
}
|
||||
|
||||
for _, name := range affectedNames {
|
||||
delete(nm.cache, string(name))
|
||||
if err := nm.repo.DropChanges(name, height); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nm.height = height
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nm *BaseManager) getDelayForName(n *Node, chg change.Change) int32 {
|
||||
hasBest := n.BestClaim != nil // && n.BestClaim.Status == Activated
|
||||
if hasBest && n.BestClaim.ClaimID == chg.ClaimID {
|
||||
return 0
|
||||
}
|
||||
if chg.ActiveHeight >= chg.Height { // ActiveHeight is usually unset (aka, zero)
|
||||
return chg.ActiveHeight - chg.Height
|
||||
}
|
||||
if !hasBest {
|
||||
return 0
|
||||
}
|
||||
|
||||
needsWorkaround := nm.decideIfWorkaroundNeeded(n, chg)
|
||||
|
||||
delay := calculateDelay(chg.Height, n.TakenOverAt)
|
||||
if delay > 0 && needsWorkaround {
|
||||
// TODO: log this (but only once per name-height combo)
|
||||
//fmt.Printf("Delay workaround applies to %s at %d\n", chg.Name, chg.Height)
|
||||
return 0
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
// decideIfWorkaroundNeeded handles bugs that existed in previous versions
|
||||
func (nm *BaseManager) decideIfWorkaroundNeeded(n *Node, chg change.Change) bool {
|
||||
|
||||
if chg.Height >= param.MaxRemovalWorkaroundHeight {
|
||||
// TODO: hard fork this out; it's a bug from previous versions:
|
||||
|
||||
if chg.Height <= 933294 {
|
||||
heights, ok := param.DelayWorkaroundsPart2[string(chg.Name)]
|
||||
if ok {
|
||||
for _, h := range heights {
|
||||
if h == chg.Height {
|
||||
//hc := nm.hasChildrenButNoSelf(chg.Name, chg.Height, 2)
|
||||
hc := true
|
||||
fmt.Printf("HC: %s: %t\n", chg.Name, hc)
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Known hits:
|
||||
if nm.hasChildrenButNoSelf(chg.Name, chg.Height, 2) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else if len(n.Claims) > 0 {
|
||||
// NOTE: old code had a bug in it where nodes with no claims but with children would get left in the cache after removal.
|
||||
// This would cause the getNumBlocksOfContinuousOwnership to return zero (causing incorrect takeover height calc).
|
||||
w, ok := param.DelayWorkarounds[string(chg.Name)]
|
||||
if ok {
|
||||
for _, h := range w {
|
||||
if chg.Height == h {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func calculateDelay(curr, tookOver int32) int32 {
|
||||
|
||||
delay := (curr - tookOver) / param.ActiveDelayFactor
|
||||
if delay > param.MaxActiveDelay {
|
||||
return param.MaxActiveDelay
|
||||
}
|
||||
|
||||
return delay
|
||||
}
|
||||
|
||||
func (nm BaseManager) NextUpdateHeightOfNode(name []byte) ([]byte, int32) {
|
||||
|
||||
n, err := nm.Node(name)
|
||||
if err != nil || n == nil {
|
||||
return name, 0
|
||||
}
|
||||
|
||||
return name, n.NextUpdate()
|
||||
}
|
||||
|
||||
func (nm *BaseManager) Height() int32 {
|
||||
return nm.height
|
||||
}
|
||||
|
||||
func (nm *BaseManager) Close() error {
|
||||
|
||||
err := nm.repo.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("close repo: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nm *BaseManager) hasChildrenButNoSelf(name []byte, height int32, required int) bool {
|
||||
c := map[byte]bool{}
|
||||
|
||||
nm.repo.IterateChildren(name, func(changes []change.Change) bool {
|
||||
// if the key is unseen, generate a node for it to height
|
||||
// if that node is active then increase the count
|
||||
if len(changes) == 0 {
|
||||
return true
|
||||
}
|
||||
n, _ := nm.newNodeFromChanges(changes, height)
|
||||
if n != nil && n.BestClaim != nil && n.BestClaim.Status == Activated {
|
||||
if len(name) >= len(changes[0].Name) {
|
||||
return false // hit self
|
||||
}
|
||||
c[changes[0].Name[len(name)]] = true
|
||||
if len(c) >= required {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
return len(c) >= required
|
||||
}
|
||||
|
||||
func (nm *BaseManager) IterateNames(predicate func(name []byte) bool) {
|
||||
nm.repo.IterateAll(predicate)
|
||||
}
|
||||
|
||||
func (nm *BaseManager) ClaimHashes(name []byte) []*chainhash.Hash {
|
||||
|
||||
n, err := nm.Node(name)
|
||||
if err != nil || n == nil {
|
||||
return nil
|
||||
}
|
||||
n.SortClaims()
|
||||
claimHashes := make([]*chainhash.Hash, 0, len(n.Claims))
|
||||
for _, c := range n.Claims {
|
||||
if c.Status == Activated { // TODO: unit test this line
|
||||
claimHashes = append(claimHashes, calculateNodeHash(c.OutPoint, n.TakenOverAt))
|
||||
}
|
||||
}
|
||||
return claimHashes
|
||||
}
|
||||
|
||||
func (nm *BaseManager) Hash(name []byte) *chainhash.Hash {
|
||||
|
||||
n, err := nm.Node(name)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if n != nil && len(n.Claims) > 0 {
|
||||
if n.BestClaim != nil && n.BestClaim.Status == Activated {
|
||||
return calculateNodeHash(n.BestClaim.OutPoint, n.TakenOverAt)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func calculateNodeHash(op wire.OutPoint, takeover int32) *chainhash.Hash {
|
||||
|
||||
txHash := chainhash.DoubleHashH(op.Hash[:])
|
||||
|
||||
nOut := []byte(strconv.Itoa(int(op.Index)))
|
||||
nOutHash := chainhash.DoubleHashH(nOut)
|
||||
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(takeover))
|
||||
heightHash := chainhash.DoubleHashH(buf)
|
||||
|
||||
h := make([]byte, 0, sha256.Size*3)
|
||||
h = append(h, txHash[:]...)
|
||||
h = append(h, nOutHash[:]...)
|
||||
h = append(h, heightHash[:]...)
|
||||
|
||||
hh := chainhash.DoubleHashH(h)
|
||||
|
||||
return &hh
|
||||
}
|
133
claimtrie/node/manager_test.go
Normal file
133
claimtrie/node/manager_test.go
Normal file
|
@ -0,0 +1,133 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/btcsuite/btcd/claimtrie/node/noderepo"
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
out1 = NewOutPointFromString("0000000000000000000000000000000000000000000000000000000000000000:1")
|
||||
out2 = NewOutPointFromString("0000000000000000000000000000000000000000000000000000000000000000:2")
|
||||
out3 = NewOutPointFromString("0100000000000000000000000000000000000000000000000000000000000000:1")
|
||||
name1 = []byte("name1")
|
||||
name2 = []byte("name2")
|
||||
)
|
||||
|
||||
// verify that we can round-trip bytes to strings
|
||||
func TestStringRoundTrip(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
data := [][]byte{
|
||||
{97, 98, 99, 0, 100, 255},
|
||||
{0xc3, 0x28},
|
||||
{0xa0, 0xa1},
|
||||
{0xe2, 0x28, 0xa1},
|
||||
{0xf0, 0x28, 0x8c, 0x28},
|
||||
}
|
||||
for _, d := range data {
|
||||
s := string(d)
|
||||
r.Equal(s, fmt.Sprintf("%s", d))
|
||||
d2 := []byte(s)
|
||||
r.Equal(len(d), len(s))
|
||||
r.Equal(d, d2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleAddClaim(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
param.SetNetwork(wire.TestNet, "")
|
||||
repo, err := noderepo.NewPebble(t.TempDir())
|
||||
r.NoError(err)
|
||||
|
||||
m, err := NewBaseManager(repo)
|
||||
r.NoError(err)
|
||||
|
||||
_, err = m.IncrementHeightTo(10)
|
||||
r.NoError(err)
|
||||
|
||||
chg := change.New(change.AddClaim).SetName(name1).SetOutPoint(out1.String()).SetHeight(11)
|
||||
err = m.AppendChange(chg)
|
||||
r.NoError(err)
|
||||
_, err = m.IncrementHeightTo(11)
|
||||
r.NoError(err)
|
||||
|
||||
chg = chg.SetName(name2).SetOutPoint(out2.String()).SetHeight(12)
|
||||
err = m.AppendChange(chg)
|
||||
r.NoError(err)
|
||||
_, err = m.IncrementHeightTo(12)
|
||||
r.NoError(err)
|
||||
|
||||
n1, err := m.Node(name1)
|
||||
r.NoError(err)
|
||||
r.Equal(1, len(n1.Claims))
|
||||
r.NotNil(n1.Claims.find(byOut(*out1)))
|
||||
|
||||
n2, err := m.Node(name2)
|
||||
r.NoError(err)
|
||||
r.Equal(1, len(n2.Claims))
|
||||
r.NotNil(n2.Claims.find(byOut(*out2)))
|
||||
|
||||
err = m.DecrementHeightTo([][]byte{name2}, 11)
|
||||
r.NoError(err)
|
||||
n2, err = m.Node(name2)
|
||||
r.NoError(err)
|
||||
r.Nil(n2)
|
||||
|
||||
err = m.DecrementHeightTo([][]byte{name1}, 1)
|
||||
r.NoError(err)
|
||||
n2, err = m.Node(name1)
|
||||
r.NoError(err)
|
||||
r.Nil(n2)
|
||||
}
|
||||
|
||||
func TestNodeSort(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
param.ExtendedClaimExpirationTime = 1000
|
||||
|
||||
r.True(OutPointLess(*out1, *out2))
|
||||
r.True(OutPointLess(*out1, *out3))
|
||||
|
||||
n := New()
|
||||
n.Claims = append(n.Claims, &Claim{OutPoint: *out1, AcceptedAt: 3, Amount: 3, ClaimID: "a"})
|
||||
n.Claims = append(n.Claims, &Claim{OutPoint: *out2, AcceptedAt: 3, Amount: 3, ClaimID: "b"})
|
||||
n.handleExpiredAndActivated(3)
|
||||
n.updateTakeoverHeight(3, []byte{}, true)
|
||||
|
||||
r.Equal(n.Claims.find(byOut(*out1)).OutPoint.String(), n.BestClaim.OutPoint.String())
|
||||
|
||||
n.Claims = append(n.Claims, &Claim{OutPoint: *out3, AcceptedAt: 3, Amount: 3, ClaimID: "c"})
|
||||
n.handleExpiredAndActivated(3)
|
||||
n.updateTakeoverHeight(3, []byte{}, true)
|
||||
r.Equal(n.Claims.find(byOut(*out1)).OutPoint.String(), n.BestClaim.OutPoint.String())
|
||||
}
|
||||
|
||||
func TestClaimSort(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
param.ExtendedClaimExpirationTime = 1000
|
||||
|
||||
n := New()
|
||||
n.Claims = append(n.Claims, &Claim{OutPoint: *out2, AcceptedAt: 3, Amount: 3, ClaimID: "b"})
|
||||
n.Claims = append(n.Claims, &Claim{OutPoint: *out3, AcceptedAt: 3, Amount: 2, ClaimID: "c"})
|
||||
n.Claims = append(n.Claims, &Claim{OutPoint: *out3, AcceptedAt: 4, Amount: 2, ClaimID: "d"})
|
||||
n.Claims = append(n.Claims, &Claim{OutPoint: *out1, AcceptedAt: 3, Amount: 4, ClaimID: "a"})
|
||||
n.SortClaims()
|
||||
|
||||
r.Equal(int64(4), n.Claims[0].Amount)
|
||||
r.Equal(int64(3), n.Claims[1].Amount)
|
||||
r.Equal(int64(2), n.Claims[2].Amount)
|
||||
r.Equal(int32(4), n.Claims[3].AcceptedAt)
|
||||
}
|
300
claimtrie/node/node.go
Normal file
300
claimtrie/node/node.go
Normal file
|
@ -0,0 +1,300 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
)
|
||||
|
||||
// ErrNotFound is returned when a claim or support is not found.
|
||||
var mispents = map[string]bool{}
|
||||
|
||||
type Node struct {
|
||||
BestClaim *Claim // The claim that has most effective amount at the current height.
|
||||
TakenOverAt int32 // The height at when the current BestClaim took over.
|
||||
Claims ClaimList // List of all Claims.
|
||||
Supports ClaimList // List of all Supports, including orphaned ones.
|
||||
}
|
||||
|
||||
// New returns a new node.
|
||||
func New() *Node {
|
||||
return &Node{}
|
||||
}
|
||||
|
||||
func (n *Node) ApplyChange(chg change.Change, delay int32) error {
|
||||
|
||||
out := NewOutPointFromString(chg.OutPoint)
|
||||
|
||||
visibleAt := chg.VisibleHeight
|
||||
if visibleAt <= 0 {
|
||||
visibleAt = chg.Height
|
||||
}
|
||||
|
||||
switch chg.Type {
|
||||
case change.AddClaim:
|
||||
c := &Claim{
|
||||
OutPoint: *out,
|
||||
Amount: chg.Amount,
|
||||
ClaimID: chg.ClaimID,
|
||||
AcceptedAt: chg.Height, // not tracking original height in this version (but we could)
|
||||
ActiveAt: chg.Height + delay,
|
||||
Value: chg.Value,
|
||||
VisibleAt: visibleAt,
|
||||
}
|
||||
old := n.Claims.find(byOut(*out)) // TODO: remove this after proving ResetHeight works
|
||||
if old != nil {
|
||||
fmt.Printf("CONFLICT WITH EXISTING TXO! Name: %s, Height: %d\n", chg.Name, chg.Height)
|
||||
}
|
||||
n.Claims = append(n.Claims, c)
|
||||
|
||||
case change.SpendClaim:
|
||||
c := n.Claims.find(byOut(*out))
|
||||
if c != nil {
|
||||
c.setStatus(Deactivated)
|
||||
} else if !mispents[fmt.Sprintf("%d_%s", chg.Height, chg.ClaimID)] {
|
||||
mispents[fmt.Sprintf("%d_%s", chg.Height, chg.ClaimID)] = true
|
||||
fmt.Printf("Spending claim but missing existing claim with TXO %s\n "+
|
||||
"Name: %s, ID: %s\n", chg.OutPoint, chg.Name, chg.ClaimID)
|
||||
}
|
||||
// apparently it's legit to be absent in the map:
|
||||
// 'two' at 481100, 36a719a156a1df178531f3c712b8b37f8e7cc3b36eea532df961229d936272a1:0
|
||||
|
||||
case change.UpdateClaim:
|
||||
// Find and remove the claim, which has just been spent.
|
||||
c := n.Claims.find(byID(chg.ClaimID))
|
||||
if c != nil && c.Status == Deactivated {
|
||||
|
||||
// Keep its ID, which was generated from the spent claim.
|
||||
// And update the rest of properties.
|
||||
c.setOutPoint(*out).SetAmt(chg.Amount).SetValue(chg.Value)
|
||||
c.setStatus(Accepted) // it was Deactivated in the spend
|
||||
|
||||
// It's a bug, but the old code would update these.
|
||||
// That forces this to be newer, which may in an unintentional takeover if there's an older one.
|
||||
c.setAccepted(chg.Height) // TODO: Fork this out
|
||||
c.setActiveAt(chg.Height + delay) // TODO: Fork this out
|
||||
|
||||
} else {
|
||||
fmt.Printf("Updating claim but missing existing claim with ID %s", chg.ClaimID)
|
||||
}
|
||||
case change.AddSupport:
|
||||
n.Supports = append(n.Supports, &Claim{
|
||||
OutPoint: *out,
|
||||
Amount: chg.Amount,
|
||||
ClaimID: chg.ClaimID,
|
||||
AcceptedAt: chg.Height,
|
||||
Value: chg.Value,
|
||||
ActiveAt: chg.Height + delay,
|
||||
VisibleAt: visibleAt,
|
||||
})
|
||||
|
||||
case change.SpendSupport:
|
||||
s := n.Supports.find(byOut(*out))
|
||||
if s != nil {
|
||||
s.setStatus(Deactivated)
|
||||
} else {
|
||||
fmt.Printf("Spending support but missing existing support with TXO %s\n "+
|
||||
"Name: %s, ID: %s\n", chg.OutPoint, chg.Name, chg.ClaimID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdjustTo activates claims and computes takeovers until it reaches the specified height.
|
||||
func (n *Node) AdjustTo(height, maxHeight int32, name []byte) *Node {
|
||||
changed := n.handleExpiredAndActivated(height) > 0
|
||||
n.updateTakeoverHeight(height, name, changed)
|
||||
if maxHeight > height {
|
||||
for h := n.NextUpdate(); h <= maxHeight; h = n.NextUpdate() {
|
||||
changed = n.handleExpiredAndActivated(h) > 0
|
||||
n.updateTakeoverHeight(h, name, changed)
|
||||
height = h
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (n *Node) updateTakeoverHeight(height int32, name []byte, refindBest bool) {
|
||||
|
||||
candidate := n.BestClaim
|
||||
if refindBest {
|
||||
candidate = n.findBestClaim() // so expensive...
|
||||
}
|
||||
|
||||
hasCandidate := candidate != nil
|
||||
hasCurrentWinner := n.BestClaim != nil && n.BestClaim.Status == Activated
|
||||
|
||||
takeoverHappening := !hasCandidate || !hasCurrentWinner || candidate.ClaimID != n.BestClaim.ClaimID
|
||||
|
||||
if takeoverHappening {
|
||||
if n.activateAllClaims(height) > 0 {
|
||||
candidate = n.findBestClaim()
|
||||
}
|
||||
}
|
||||
|
||||
if !takeoverHappening && height < param.MaxRemovalWorkaroundHeight {
|
||||
// This is a super ugly hack to work around bug in old code.
|
||||
// The bug: un/support a name then update it. This will cause its takeover height to be reset to current.
|
||||
// This is because the old code would add to the cache without setting block originals when dealing in supports.
|
||||
_, takeoverHappening = param.TakeoverWorkarounds[fmt.Sprintf("%d_%s", height, name)] // TODO: ditch the fmt call
|
||||
}
|
||||
|
||||
if takeoverHappening {
|
||||
n.TakenOverAt = height
|
||||
n.BestClaim = candidate
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) handleExpiredAndActivated(height int32) int {
|
||||
|
||||
changes := 0
|
||||
update := func(items ClaimList) ClaimList {
|
||||
for i := 0; i < len(items); i++ {
|
||||
c := items[i]
|
||||
if c.Status == Accepted && c.ActiveAt <= height && c.VisibleAt <= height {
|
||||
c.setStatus(Activated)
|
||||
changes++
|
||||
}
|
||||
if c.ExpireAt() <= height || c.Status == Deactivated {
|
||||
if i < len(items)-1 {
|
||||
items[i] = items[len(items)-1]
|
||||
i--
|
||||
}
|
||||
items = items[:len(items)-1]
|
||||
changes++
|
||||
}
|
||||
}
|
||||
return items
|
||||
}
|
||||
n.Claims = update(n.Claims)
|
||||
n.Supports = update(n.Supports)
|
||||
return changes
|
||||
}
|
||||
|
||||
// NextUpdate returns the nearest height in the future that the node should
|
||||
// be refreshed due to changes of claims or supports.
|
||||
func (n Node) NextUpdate() int32 {
|
||||
|
||||
next := int32(math.MaxInt32)
|
||||
|
||||
for _, c := range n.Claims {
|
||||
if c.ExpireAt() < next {
|
||||
next = c.ExpireAt()
|
||||
}
|
||||
// if we're not active, we need to go to activeAt unless we're still invisible there
|
||||
if c.Status == Accepted {
|
||||
min := c.ActiveAt
|
||||
if c.VisibleAt > min {
|
||||
min = c.VisibleAt
|
||||
}
|
||||
if min < next {
|
||||
next = min
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range n.Supports {
|
||||
if s.ExpireAt() < next {
|
||||
next = s.ExpireAt()
|
||||
}
|
||||
if s.Status == Accepted {
|
||||
min := s.ActiveAt
|
||||
if s.VisibleAt > min {
|
||||
min = s.VisibleAt
|
||||
}
|
||||
if min < next {
|
||||
next = min
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return next
|
||||
}
|
||||
|
||||
func (n Node) findBestClaim() *Claim {
|
||||
|
||||
// WARNING: this method is called billions of times.
|
||||
// if we just had some easy way to know that our best claim was the first one in the list...
|
||||
// or it may be faster to cache effective amount in the db at some point.
|
||||
|
||||
var best *Claim
|
||||
var bestAmount int64
|
||||
for _, candidate := range n.Claims {
|
||||
|
||||
// not using switch here for performance reasons
|
||||
if candidate.Status != Activated {
|
||||
continue
|
||||
}
|
||||
|
||||
if best == nil {
|
||||
best = candidate
|
||||
continue
|
||||
}
|
||||
|
||||
candidateAmount := candidate.EffectiveAmount(n.Supports)
|
||||
if bestAmount <= 0 { // trying to reduce calls to EffectiveAmount
|
||||
bestAmount = best.EffectiveAmount(n.Supports)
|
||||
}
|
||||
|
||||
switch {
|
||||
case candidateAmount > bestAmount:
|
||||
best = candidate
|
||||
bestAmount = candidateAmount
|
||||
case candidateAmount < bestAmount:
|
||||
continue
|
||||
case candidate.AcceptedAt < best.AcceptedAt:
|
||||
best = candidate
|
||||
bestAmount = candidateAmount
|
||||
case candidate.AcceptedAt > best.AcceptedAt:
|
||||
continue
|
||||
case OutPointLess(candidate.OutPoint, best.OutPoint):
|
||||
best = candidate
|
||||
bestAmount = candidateAmount
|
||||
}
|
||||
}
|
||||
|
||||
return best
|
||||
}
|
||||
|
||||
func (n *Node) activateAllClaims(height int32) int {
|
||||
count := 0
|
||||
for _, c := range n.Claims {
|
||||
if c.Status == Accepted && c.ActiveAt > height && c.VisibleAt <= height {
|
||||
c.setActiveAt(height) // don't necessary need to change this number
|
||||
c.setStatus(Activated)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range n.Supports {
|
||||
if s.Status == Accepted && s.ActiveAt > height && s.VisibleAt <= height {
|
||||
s.setActiveAt(height) // don't necessary need to change this number
|
||||
s.setStatus(Activated)
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (n *Node) SortClaims() {
|
||||
|
||||
// purposefully sorting by descent
|
||||
sort.Slice(n.Claims, func(j, i int) bool {
|
||||
iAmount := n.Claims[i].EffectiveAmount(n.Supports)
|
||||
jAmount := n.Claims[j].EffectiveAmount(n.Supports)
|
||||
switch {
|
||||
case iAmount < jAmount:
|
||||
return true
|
||||
case iAmount > jAmount:
|
||||
return false
|
||||
case n.Claims[i].AcceptedAt > n.Claims[j].AcceptedAt:
|
||||
return true
|
||||
case n.Claims[i].AcceptedAt < n.Claims[j].AcceptedAt:
|
||||
return false
|
||||
}
|
||||
return OutPointLess(n.Claims[j].OutPoint, n.Claims[i].OutPoint)
|
||||
})
|
||||
}
|
188
claimtrie/node/noderepo/noderepo_test.go
Normal file
188
claimtrie/node/noderepo/noderepo_test.go
Normal file
|
@ -0,0 +1,188 @@
|
|||
package noderepo
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/btcsuite/btcd/claimtrie/node"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
opStr1 = "0000000000000000000000000000000000000000000000000000000000000000:1"
|
||||
testNodeName1 = []byte("name1")
|
||||
)
|
||||
|
||||
func TestPebble(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
repo, err := NewPebble(t.TempDir())
|
||||
r.NoError(err)
|
||||
defer func() {
|
||||
err := repo.Close()
|
||||
r.NoError(err)
|
||||
}()
|
||||
|
||||
cleanup := func() {
|
||||
lowerBound := testNodeName1
|
||||
upperBound := append(testNodeName1, byte(0))
|
||||
err := repo.db.DeleteRange(lowerBound, upperBound, nil)
|
||||
r.NoError(err)
|
||||
}
|
||||
|
||||
testNodeRepo(t, repo, func() {}, cleanup)
|
||||
}
|
||||
|
||||
func testNodeRepo(t *testing.T, repo node.Repo, setup, cleanup func()) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
chg := change.New(change.AddClaim).SetName(testNodeName1).SetOutPoint(opStr1)
|
||||
|
||||
testcases := []struct {
|
||||
name string
|
||||
height int32
|
||||
changes []change.Change
|
||||
expected []change.Change
|
||||
}{
|
||||
{
|
||||
"test 1",
|
||||
1,
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
[]change.Change{chg.SetHeight(1)},
|
||||
},
|
||||
{
|
||||
"test 2",
|
||||
2,
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
[]change.Change{chg.SetHeight(1)},
|
||||
},
|
||||
{
|
||||
"test 3",
|
||||
3,
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3)},
|
||||
},
|
||||
{
|
||||
"test 4",
|
||||
4,
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3)},
|
||||
},
|
||||
{
|
||||
"test 5",
|
||||
5,
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
},
|
||||
{
|
||||
"test 6",
|
||||
6,
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testcases {
|
||||
|
||||
setup()
|
||||
|
||||
err := repo.AppendChanges(tt.changes)
|
||||
r.NoError(err)
|
||||
|
||||
changes, err := repo.LoadChanges(testNodeName1)
|
||||
r.NoError(err)
|
||||
r.Equalf(tt.expected, changes[:len(tt.expected)], tt.name)
|
||||
|
||||
cleanup()
|
||||
}
|
||||
|
||||
testcases2 := []struct {
|
||||
name string
|
||||
height int32
|
||||
changes [][]change.Change
|
||||
expected []change.Change
|
||||
}{
|
||||
{
|
||||
"Save in 2 batches, and load up to 1",
|
||||
1,
|
||||
[][]change.Change{
|
||||
{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
{chg.SetHeight(6), chg.SetHeight(8), chg.SetHeight(9)},
|
||||
},
|
||||
[]change.Change{chg.SetHeight(1)},
|
||||
},
|
||||
{
|
||||
"Save in 2 batches, and load up to 9",
|
||||
9,
|
||||
[][]change.Change{
|
||||
{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
|
||||
{chg.SetHeight(6), chg.SetHeight(8), chg.SetHeight(9)},
|
||||
},
|
||||
[]change.Change{
|
||||
chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5),
|
||||
chg.SetHeight(6), chg.SetHeight(8), chg.SetHeight(9),
|
||||
},
|
||||
},
|
||||
{
|
||||
"Save in 3 batches, and load up to 8",
|
||||
8,
|
||||
[][]change.Change{
|
||||
{chg.SetHeight(1), chg.SetHeight(3)},
|
||||
{chg.SetHeight(5)},
|
||||
{chg.SetHeight(6), chg.SetHeight(8), chg.SetHeight(9)},
|
||||
},
|
||||
[]change.Change{
|
||||
chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5),
|
||||
chg.SetHeight(6), chg.SetHeight(8),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testcases2 {
|
||||
|
||||
setup()
|
||||
|
||||
for _, changes := range tt.changes {
|
||||
err := repo.AppendChanges(changes)
|
||||
r.NoError(err)
|
||||
}
|
||||
|
||||
changes, err := repo.LoadChanges(testNodeName1)
|
||||
r.NoError(err)
|
||||
r.Equalf(tt.expected, changes[:len(tt.expected)], tt.name)
|
||||
|
||||
cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
func TestIterator(t *testing.T) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
repo, err := NewPebble(t.TempDir())
|
||||
r.NoError(err)
|
||||
defer func() {
|
||||
err := repo.Close()
|
||||
r.NoError(err)
|
||||
}()
|
||||
|
||||
creation := []change.Change{
|
||||
{Name: []byte("test\x00"), Height: 5},
|
||||
{Name: []byte("test\x00\x00"), Height: 5},
|
||||
{Name: []byte("test\x00b"), Height: 5},
|
||||
{Name: []byte("test\x00\xFF"), Height: 5},
|
||||
{Name: []byte("testa"), Height: 5},
|
||||
}
|
||||
err = repo.AppendChanges(creation)
|
||||
r.NoError(err)
|
||||
|
||||
var received []change.Change
|
||||
repo.IterateChildren([]byte{}, func(changes []change.Change) bool {
|
||||
received = append(received, changes...)
|
||||
return true
|
||||
})
|
||||
r.Equal(creation, received)
|
||||
}
|
157
claimtrie/node/noderepo/pebble.go
Normal file
157
claimtrie/node/noderepo/pebble.go
Normal file
|
@ -0,0 +1,157 @@
|
|||
package noderepo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/cockroachdb/pebble"
|
||||
"github.com/vmihailenco/msgpack/v5"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Pebble struct {
|
||||
db *pebble.DB
|
||||
}
|
||||
|
||||
func NewPebble(path string) (*Pebble, error) {
|
||||
|
||||
db, err := pebble.Open(path, &pebble.Options{Cache: pebble.NewCache(128 << 20), BytesPerSync: 16 << 20})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pebble open %s, %w", path, err)
|
||||
}
|
||||
|
||||
repo := &Pebble{db: db}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
// AppendChanges makes an assumption that anything you pass to it is newer than what was saved before.
|
||||
func (repo *Pebble) AppendChanges(changes []change.Change) error {
|
||||
|
||||
batch := repo.db.NewBatch()
|
||||
|
||||
// TODO: switch to buffer pool and reuse encoder
|
||||
for _, chg := range changes {
|
||||
value, err := msgpack.Marshal(chg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("msgpack marshal value: %w", err)
|
||||
}
|
||||
|
||||
err = batch.Merge(chg.Name, value, pebble.NoSync)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble set: %w", err)
|
||||
}
|
||||
}
|
||||
err := batch.Commit(pebble.NoSync)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble save commit: %w", err)
|
||||
}
|
||||
batch.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
func (repo *Pebble) LoadChanges(name []byte) ([]change.Change, error) {
|
||||
|
||||
data, closer, err := repo.db.Get(name)
|
||||
if err != nil && err != pebble.ErrNotFound {
|
||||
return nil, fmt.Errorf("pebble get: %w", err)
|
||||
}
|
||||
if closer != nil {
|
||||
defer closer.Close()
|
||||
}
|
||||
|
||||
return unmarshalChanges(data)
|
||||
}
|
||||
|
||||
func unmarshalChanges(data []byte) ([]change.Change, error) {
|
||||
var changes []change.Change
|
||||
dec := msgpack.GetDecoder()
|
||||
defer msgpack.PutDecoder(dec)
|
||||
|
||||
reader := bytes.NewReader(data)
|
||||
dec.Reset(reader)
|
||||
for reader.Len() > 0 {
|
||||
var chg change.Change
|
||||
err := dec.Decode(&chg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("msgpack unmarshal: %w", err)
|
||||
}
|
||||
changes = append(changes, chg)
|
||||
}
|
||||
|
||||
// this was required for the normalization stuff:
|
||||
sort.SliceStable(changes, func(i, j int) bool {
|
||||
return changes[i].Height < changes[j].Height
|
||||
})
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func (repo *Pebble) DropChanges(name []byte, finalHeight int32) error {
|
||||
changes, err := repo.LoadChanges(name)
|
||||
i := 0
|
||||
for ; i < len(changes); i++ {
|
||||
if changes[i].Height > finalHeight {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble drop: %w", err)
|
||||
}
|
||||
// making a performance assumption that DropChanges won't happen often:
|
||||
err = repo.db.Set(name, []byte{}, pebble.NoSync)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble drop: %w", err)
|
||||
}
|
||||
return repo.AppendChanges(changes[:i])
|
||||
}
|
||||
|
||||
func (repo *Pebble) IterateChildren(name []byte, f func(changes []change.Change) bool) {
|
||||
end := bytes.NewBuffer(nil)
|
||||
end.Write(name)
|
||||
end.Write(bytes.Repeat([]byte{255, 255, 255, 255}, 64))
|
||||
|
||||
prefixIterOptions := &pebble.IterOptions{
|
||||
LowerBound: name,
|
||||
UpperBound: end.Bytes(),
|
||||
}
|
||||
|
||||
iter := repo.db.NewIter(prefixIterOptions)
|
||||
defer iter.Close()
|
||||
|
||||
for iter.First(); iter.Valid(); iter.Next() {
|
||||
changes, err := unmarshalChanges(iter.Value())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !f(changes) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (repo *Pebble) IterateAll(predicate func(name []byte) bool) {
|
||||
iter := repo.db.NewIter(nil)
|
||||
defer iter.Close()
|
||||
|
||||
for iter.First(); iter.Valid(); iter.Next() {
|
||||
if !predicate(iter.Key()) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (repo *Pebble) Close() error {
|
||||
|
||||
err := repo.db.Flush()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble flush: %w", err)
|
||||
}
|
||||
|
||||
err = repo.db.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble close: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
30
claimtrie/node/normalizer.go
Normal file
30
claimtrie/node/normalizer.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
//func init() {
|
||||
// if cases.UnicodeVersion[:2] != "11" {
|
||||
// panic("Wrong unicode version!")
|
||||
// }
|
||||
//}
|
||||
|
||||
var Normalize = normalizeGo
|
||||
|
||||
func NormalizeIfNecessary(name []byte, height int32) []byte {
|
||||
if height < param.NormalizedNameForkHeight {
|
||||
return name
|
||||
}
|
||||
return Normalize(name)
|
||||
}
|
||||
|
||||
var folder = cases.Fold()
|
||||
|
||||
func normalizeGo(value []byte) []byte {
|
||||
|
||||
normalized := norm.NFD.Bytes(value)
|
||||
return folder.Bytes(normalized)
|
||||
}
|
65
claimtrie/node/normalizer_icu.go
Normal file
65
claimtrie/node/normalizer_icu.go
Normal file
|
@ -0,0 +1,65 @@
|
|||
// +build use_icu_normalization
|
||||
|
||||
package node
|
||||
|
||||
// #cgo CFLAGS: -O2
|
||||
// #cgo LDFLAGS: -licuio -licui18n -licuuc -licudata
|
||||
// #include <unicode/unorm2.h>
|
||||
// #include <unicode/ustring.h>
|
||||
// #include <unicode/uversion.h>
|
||||
// int icu_version() {
|
||||
// UVersionInfo info;
|
||||
// u_getVersion(info);
|
||||
// return ((int)(info[0]) << 16) + info[1];
|
||||
// }
|
||||
// int normalize(char* name, int length, char* result) {
|
||||
// UErrorCode ec = U_ZERO_ERROR;
|
||||
// static const UNormalizer2* normalizer = NULL;
|
||||
// if (normalizer == NULL) normalizer = unorm2_getNFDInstance(&ec);
|
||||
// UChar dest[256]; // maximum claim name size is 255; we won't have more UTF16 chars than bytes
|
||||
// int dest_len;
|
||||
// u_strFromUTF8(dest, 256, &dest_len, name, length, &ec);
|
||||
// if (U_FAILURE(ec) || dest_len == 0) return 0;
|
||||
// UChar normalized[256];
|
||||
// dest_len = unorm2_normalize(normalizer, dest, dest_len, normalized, 256, &ec);
|
||||
// if (U_FAILURE(ec) || dest_len == 0) return 0;
|
||||
// dest_len = u_strFoldCase(dest, 256, normalized, dest_len, U_FOLD_CASE_DEFAULT, &ec);
|
||||
// if (U_FAILURE(ec) || dest_len == 0) return 0;
|
||||
// u_strToUTF8(result, 512, &dest_len, dest, dest_len, &ec);
|
||||
// return dest_len;
|
||||
// }
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Normalize = normalizeICU
|
||||
}
|
||||
|
||||
func IcuVersion() string {
|
||||
// TODO: we probably need to explode if it's not 63.2 as it affects consensus
|
||||
result := C.icu_version()
|
||||
return fmt.Sprintf("%d.%d", result>>16, result&0xffff)
|
||||
}
|
||||
|
||||
func normalizeICU(value []byte) []byte {
|
||||
if len(value) <= 0 {
|
||||
return value
|
||||
}
|
||||
name := (*C.char)(unsafe.Pointer(&value[0]))
|
||||
length := C.int(len(value))
|
||||
|
||||
// hopefully this is a stack alloc (but it may be a bit large for that):
|
||||
var resultName [512]byte // inputs are restricted to 255 chars; it shouldn't expand too much past that
|
||||
result := unsafe.Pointer(&resultName[0])
|
||||
|
||||
resultLength := C.normalize(name, length, (*C.char)(result))
|
||||
if resultLength == 0 {
|
||||
return value
|
||||
}
|
||||
|
||||
// return resultName[0:resultLength] -- we want to shrink the result (not use a slice on 1024)
|
||||
return C.GoBytes(result, resultLength)
|
||||
}
|
23
claimtrie/node/normalizer_icu_test.go
Normal file
23
claimtrie/node/normalizer_icu_test.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
// +build use_icu_normalization
|
||||
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNormalizationICU(t *testing.T) {
|
||||
testNormalization(t, normalizeICU)
|
||||
}
|
||||
|
||||
func BenchmarkNormalizeICU(b *testing.B) {
|
||||
benchmarkNormalize(b, normalizeICU)
|
||||
}
|
||||
|
||||
func TestBlock760150(t *testing.T) {
|
||||
test := "Ꮖ-Ꮩ-Ꭺ-N--------Ꭺ-N-Ꮹ-Ꭼ-Ꮮ-Ꭺ-on-Instagram_-“Our-next-destination-is-East-and-Southeast-Asia--selfie--asia”"
|
||||
a := normalizeGo([]byte(test))
|
||||
b := normalizeICU([]byte(test))
|
||||
assert.Equal(t, a, b)
|
||||
}
|
54
claimtrie/node/normalizer_test.go
Normal file
54
claimtrie/node/normalizer_test.go
Normal file
|
@ -0,0 +1,54 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNormalizationGo(t *testing.T) {
|
||||
testNormalization(t, normalizeGo)
|
||||
}
|
||||
|
||||
func testNormalization(t *testing.T, normalize func(value []byte) []byte) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
r.Equal("test", string(normalize([]byte("TESt"))))
|
||||
r.Equal("test 23", string(normalize([]byte("tesT 23"))))
|
||||
r.Equal("\xFF", string(normalize([]byte("\xFF"))))
|
||||
r.Equal("\xC3\x28", string(normalize([]byte("\xC3\x28"))))
|
||||
r.Equal("\xCF\x89", string(normalize([]byte("\xE2\x84\xA6"))))
|
||||
r.Equal("\xD1\x84", string(normalize([]byte("\xD0\xA4"))))
|
||||
r.Equal("\xD5\xA2", string(normalize([]byte("\xD4\xB2"))))
|
||||
r.Equal("\xE3\x81\xB5\xE3\x82\x99", string(normalize([]byte("\xE3\x81\xB6"))))
|
||||
r.Equal("\xE1\x84\x81\xE1\x85\xAA\xE1\x86\xB0", string(normalize([]byte("\xEA\xBD\x91"))))
|
||||
}
|
||||
|
||||
func randSeq(n int) []byte {
|
||||
var alphabet = []rune("abcdefghijklmnopqrstuvwxyz̃ABCDEFGHIJKLMNOPQRSTUVWXYZ̃")
|
||||
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = alphabet[rand.Intn(len(alphabet))]
|
||||
}
|
||||
return []byte(string(b))
|
||||
}
|
||||
|
||||
func BenchmarkNormalize(b *testing.B) {
|
||||
benchmarkNormalize(b, normalizeGo)
|
||||
}
|
||||
|
||||
func benchmarkNormalize(b *testing.B, normalize func(value []byte) []byte) {
|
||||
rand.Seed(42)
|
||||
strings := make([][]byte, b.N)
|
||||
for i := 0; i < b.N; i++ {
|
||||
strings[i] = randSeq(32)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
s := normalize(strings[i])
|
||||
require.True(b, len(s) >= 8)
|
||||
}
|
||||
}
|
123
claimtrie/node/normalizing_manager.go
Normal file
123
claimtrie/node/normalizing_manager.go
Normal file
|
@ -0,0 +1,123 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
"github.com/btcsuite/btcd/claimtrie/param"
|
||||
)
|
||||
|
||||
type NormalizingManager struct { // implements Manager
|
||||
Manager
|
||||
normalizedAt int32
|
||||
}
|
||||
|
||||
func NewNormalizingManager(baseManager Manager) Manager {
|
||||
return &NormalizingManager{
|
||||
Manager: baseManager,
|
||||
normalizedAt: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (nm *NormalizingManager) AppendChange(chg change.Change) error {
|
||||
chg.Name = NormalizeIfNecessary(chg.Name, chg.Height)
|
||||
return nm.Manager.AppendChange(chg)
|
||||
}
|
||||
|
||||
func (nm *NormalizingManager) IncrementHeightTo(height int32) ([][]byte, error) {
|
||||
nm.addNormalizationForkChangesIfNecessary(height)
|
||||
return nm.Manager.IncrementHeightTo(height)
|
||||
}
|
||||
|
||||
func (nm *NormalizingManager) DecrementHeightTo(affectedNames [][]byte, height int32) error {
|
||||
if nm.normalizedAt > height {
|
||||
nm.normalizedAt = -1
|
||||
}
|
||||
return nm.Manager.DecrementHeightTo(affectedNames, height)
|
||||
}
|
||||
|
||||
func (nm *NormalizingManager) NextUpdateHeightOfNode(name []byte) ([]byte, int32) {
|
||||
name, nextUpdate := nm.Manager.NextUpdateHeightOfNode(name)
|
||||
if nextUpdate > param.NormalizedNameForkHeight {
|
||||
name = Normalize(name)
|
||||
}
|
||||
return name, nextUpdate
|
||||
}
|
||||
|
||||
func (nm *NormalizingManager) addNormalizationForkChangesIfNecessary(height int32) {
|
||||
|
||||
if nm.Manager.Height()+1 != height {
|
||||
// initialization phase
|
||||
if height >= param.NormalizedNameForkHeight {
|
||||
nm.normalizedAt = param.NormalizedNameForkHeight // eh, we don't really know that it happened there
|
||||
}
|
||||
}
|
||||
|
||||
if nm.normalizedAt >= 0 || height != param.NormalizedNameForkHeight {
|
||||
return
|
||||
}
|
||||
nm.normalizedAt = height
|
||||
fmt.Printf("Generating necessary changes for the normalization fork...\n")
|
||||
|
||||
// the original code had an unfortunate bug where many unnecessary takeovers
|
||||
// were triggered at the normalization fork
|
||||
predicate := func(name []byte) bool {
|
||||
norm := Normalize(name)
|
||||
eq := bytes.Equal(name, norm)
|
||||
if eq {
|
||||
return true
|
||||
}
|
||||
|
||||
clone := make([]byte, len(name))
|
||||
copy(clone, name) // iteration name buffer is reused on future loops
|
||||
|
||||
// by loading changes for norm here, you can determine if there will be a conflict
|
||||
|
||||
n, err := nm.Manager.Node(clone)
|
||||
if err != nil || n == nil {
|
||||
return true
|
||||
}
|
||||
for _, c := range n.Claims {
|
||||
nm.Manager.AppendChange(change.Change{
|
||||
Type: change.AddClaim,
|
||||
Name: norm,
|
||||
Height: c.AcceptedAt,
|
||||
OutPoint: c.OutPoint.String(),
|
||||
ClaimID: c.ClaimID,
|
||||
Amount: c.Amount,
|
||||
Value: c.Value,
|
||||
ActiveHeight: c.ActiveAt, // necessary to match the old hash
|
||||
VisibleHeight: height, // necessary to match the old hash; it would have been much better without
|
||||
})
|
||||
nm.Manager.AppendChange(change.Change{
|
||||
Type: change.SpendClaim,
|
||||
Name: clone,
|
||||
Height: height,
|
||||
OutPoint: c.OutPoint.String(),
|
||||
})
|
||||
}
|
||||
for _, c := range n.Supports {
|
||||
nm.Manager.AppendChange(change.Change{
|
||||
Type: change.AddSupport,
|
||||
Name: norm,
|
||||
Height: c.AcceptedAt,
|
||||
OutPoint: c.OutPoint.String(),
|
||||
ClaimID: c.ClaimID,
|
||||
Amount: c.Amount,
|
||||
Value: c.Value,
|
||||
ActiveHeight: c.ActiveAt,
|
||||
VisibleHeight: height,
|
||||
})
|
||||
nm.Manager.AppendChange(change.Change{
|
||||
Type: change.SpendSupport,
|
||||
Name: clone,
|
||||
Height: height,
|
||||
OutPoint: c.OutPoint.String(),
|
||||
})
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
nm.Manager.IterateNames(predicate)
|
||||
}
|
29
claimtrie/node/repo.go
Normal file
29
claimtrie/node/repo.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
package node
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/claimtrie/change"
|
||||
)
|
||||
|
||||
// Repo defines APIs for Node to access persistence layer.
|
||||
type Repo interface {
|
||||
// AppendChanges saves changes into the repo.
|
||||
// The changes can belong to different nodes, but the chronological
|
||||
// order must be preserved for the same node.
|
||||
AppendChanges(changes []change.Change) error
|
||||
|
||||
// LoadChanges loads changes of a node up to (includes) the specified height.
|
||||
// If no changes found, both returned slice and error will be nil.
|
||||
LoadChanges(name []byte) ([]change.Change, error)
|
||||
|
||||
DropChanges(name []byte, finalHeight int32) error
|
||||
|
||||
// Close closes the repo.
|
||||
Close() error
|
||||
|
||||
// IterateChildren returns change sets for each of name.+
|
||||
// Return false on f to stop the iteration.
|
||||
IterateChildren(name []byte, f func(changes []change.Change) bool)
|
||||
|
||||
// IterateAll iterates keys until the predicate function returns false
|
||||
IterateAll(predicate func(name []byte) bool)
|
||||
}
|
319
claimtrie/param/delays.go
Normal file
319
claimtrie/param/delays.go
Normal file
|
@ -0,0 +1,319 @@
|
|||
package param
|
||||
|
||||
var DelayWorkarounds = generateDelayWorkarounds() // called "removal workarounds" in previous versions
|
||||
|
||||
func generateDelayWorkarounds() map[string][]int32 {
|
||||
return map[string][]int32{
|
||||
"travtest01": {426898},
|
||||
"gauntlet-invade-the-darkness-lvl-1-of": {583305},
|
||||
"fr-let-s-play-software-inc-jay": {588308},
|
||||
"fr-motorsport-manager-jay-s-racing": {588308},
|
||||
"fr-crusader-kings-2-la-dynastie-6": {588318},
|
||||
"fr-jurassic-world-evolution-let-s-play": {588318},
|
||||
"calling-tech-support-scammers-live-3": {588683, 646584},
|
||||
"let-s-play-jackbox-games": {589013},
|
||||
"lets-play-jackbox-games-5": {589013},
|
||||
"kabutothesnake-s-live-ps4-broadcast": {589538},
|
||||
"no-eas-strong-thunderstorm-advisory": {589554},
|
||||
"geometry-dash-level-requests": {589564},
|
||||
"geometry-dash-level-requests-2": {589564},
|
||||
"star-ocean-integrity-and-faithlessness": {589609},
|
||||
"@pop": {589613},
|
||||
"ullash": {589630},
|
||||
"today-s-professionals-2018-winter-3": {589640},
|
||||
"today-s-professionals-2018-winter-4": {589640},
|
||||
"today-s-professionals-2018-winter-10": {589641},
|
||||
"today-s-professionals-big-brother-6-13": {589641},
|
||||
"today-s-professionals-big-brother-6-14": {589641},
|
||||
"today-s-professionals-big-brother-6-26": {589641},
|
||||
"today-s-professionals-big-brother-6-27": {589641},
|
||||
"today-s-professionals-big-brother-6-28": {589641},
|
||||
"today-s-professionals-big-brother-6-29": {589641},
|
||||
"dark-souls-iii": {589697},
|
||||
"bobby-blades": {589760},
|
||||
"adrian": {589803},
|
||||
"roblox-2": {589803, 597925},
|
||||
"roblox-4": {589803},
|
||||
"roblox-5": {589803},
|
||||
"roblox-6": {589803},
|
||||
"roblox-7": {589803},
|
||||
"roblox-8": {589803},
|
||||
"madden-17": {589809},
|
||||
"madden-18-franchise": {589810},
|
||||
"fifa-14-android-astrodude44-vs": {589831},
|
||||
"gaming-with-silverwolf-live-stream-3": {589849},
|
||||
"gaming-with-silverwolf-live-stream-4": {589849},
|
||||
"gaming-with-silverwolf-live-stream-5": {589849},
|
||||
"gaming-with-silverwolf-videos-live": {589849},
|
||||
"gaming-with-silverwolf-live-stream-6": {589851},
|
||||
"live-q-a": {589851},
|
||||
"classic-sonic-games": {589870},
|
||||
"gta": {589926},
|
||||
"j-dog7973-s-fortnite-squad": {589926},
|
||||
"wow-warlords-of-draenor-horde-side": {589967},
|
||||
"minecraft-ps4-hardcore-survival-2-the-5": {589991},
|
||||
"happy-new-year-2017": {590013},
|
||||
"come-chill-with-rekzzey-2": {590020},
|
||||
"counter-strike-global-offensive-funny": {590031},
|
||||
"father-vs-son-stickfight-stickfight": {590178},
|
||||
"little-t-playing-subnautica-livestream": {590178},
|
||||
"today-s-professionals-big-brother-7-26-5": {590200},
|
||||
"50585be4e3159a7-1": {590206},
|
||||
"dark-souls-iii-soul-level-1-challenge": {590223},
|
||||
"dark-souls-iii-soul-level-1-challenge-3": {590223},
|
||||
"let-s-play-sniper-elite-4-authentic-2": {590225},
|
||||
"skyrim-special-edition-ps4-platinum-4": {590225},
|
||||
"let-s-play-final-fantasy-the-zodiac-2": {590226},
|
||||
"let-s-play-final-fantasy-the-zodiac-3": {590226},
|
||||
"ls-h-ppchen-halloween-stream-vom-31-10": {590401},
|
||||
"a-new-stream": {590669},
|
||||
"danganronpa-v3-killing-harmony-episode": {590708},
|
||||
"danganronpa-v3-killing-harmony-episode-4": {590708},
|
||||
"danganronpa-v3-killing-harmony-episode-6": {590708},
|
||||
"danganronpa-v3-killing-harmony-episode-8": {590708},
|
||||
"danganronpa-v3-killing-harmony-episode-9": {590708},
|
||||
"call-of-duty-infinite-warfare-gameplay-2": {591982},
|
||||
"destiny-the-taken-king-gameplay": {591982},
|
||||
"horizon-zero-dawn-100-complete-4": {591983},
|
||||
"ghost-recon-wildlands-100-complete-4": {591984},
|
||||
"nier-automata-100-complete-gameplay-25": {591985},
|
||||
"frustrert": {592291},
|
||||
"call-of-duty-black-ops-3-multiplayer": {593504},
|
||||
"rayman-legends-challenges-app-the": {593551},
|
||||
"super-mario-sunshine-3-player-race-2": {593552},
|
||||
"some-new-stuff-might-play-a-game": {593698},
|
||||
"memory-techniques-1-000-people-system": {595537},
|
||||
"propresenter-6-tutorials-new-features-4": {595559},
|
||||
"rocket-league-live": {595559},
|
||||
"fortnite-battle-royale": {595818},
|
||||
"fortnite-battle-royale-2": {595818},
|
||||
"ohare12345-s-live-ps4-broadcast": {595818},
|
||||
"super-smash-bros-u-home-run-contest-13": {595838},
|
||||
"super-smash-bros-u-home-run-contest-15": {595838},
|
||||
"super-smash-bros-u-home-run-contest-2": {595838, 595844},
|
||||
"super-smash-bros-u-home-run-contest-22": {595838, 595845},
|
||||
"super-smash-bros-u-multi-man-smash-3": {595838},
|
||||
"minecraft-survival-biedronka-i-czarny-2": {596828},
|
||||
"gramy-minecraft-jasmc-pl": {596829},
|
||||
"farcry-5-gameplay": {595818},
|
||||
"my-channel-trailer": {595818},
|
||||
"full-song-production-tutorial-aeternum": {596934},
|
||||
"blackboxglobalreview-hd": {597091},
|
||||
"tom-clancy-s-rainbow-six-siege": {597633},
|
||||
"5-new-technology-innovations-in-5": {597635},
|
||||
"5-new-technology-innovations-in-5-2": {597635},
|
||||
"how-to-play-nothing-else-matters-on": {597637},
|
||||
"rb6": {597639},
|
||||
"borderlands-2-tiny-tina-s-assault-on": {597658},
|
||||
"let-s-play-borderlands-the-pre-sequel": {597658},
|
||||
"caveman-world-mountains-of-unga-boonga": {597660},
|
||||
"for-honor-ps4-2": {597706},
|
||||
"fortnite-episode-1": {597728},
|
||||
"300-subscribers": {597750},
|
||||
"viscera-cleanup-detail-santa-s-rampage": {597755},
|
||||
"infinite-voxel-terrain-in-unity-update": {597777},
|
||||
"let-s-play-pok-mon-light-platinum": {597783},
|
||||
"video-2": {597785},
|
||||
"video-8": {597785},
|
||||
"finally": {597793},
|
||||
"let-s-play-mario-party-luigi-s-engine": {597796},
|
||||
"my-edited-video": {597799},
|
||||
"we-need-to-talk": {597800},
|
||||
"tf2-stream-2": {597811},
|
||||
"royal-thumble-tuesday-night-thumbdown": {597814},
|
||||
"beat-it-michael-jackson-cover": {597815},
|
||||
"black-ops-3": {597816},
|
||||
"call-of-duty-black-ops-3-campaign": {597819},
|
||||
"skyrim-special-edition-silent-2": {597822},
|
||||
"the-chainsmokers-everybody-hates-me": {597823},
|
||||
"experiment-glowing-1000-degree-knife-vs": {597824},
|
||||
"l1011widebody-friends-let-s-play-2": {597824},
|
||||
"call-of-duty-black-ops-4": {597825},
|
||||
"let-s-play-fallout-2-restoration-3": {597825},
|
||||
"let-s-play-fallout-2-restoration-19": {597826},
|
||||
"let-s-play-fallout-2-restoration-27": {597826},
|
||||
"2015": {597828},
|
||||
"payeer": {597829},
|
||||
"youtube-3": {597829},
|
||||
"bitcoin-5": {597830},
|
||||
"2016": {597831},
|
||||
"bitcoin-2": {597831},
|
||||
"dreamtowards": {597831},
|
||||
"surfearner": {597831},
|
||||
"100-000": {597832},
|
||||
"20000": {597833},
|
||||
"remme": {597833},
|
||||
"hycon": {597834},
|
||||
"robocraft": {597834},
|
||||
"saturday-night-baseball-with-37": {597834},
|
||||
"let-s-play-command-conquer-red-alert-9": {597835},
|
||||
"15-curiosidades-que-probablemente-ya": {597837},
|
||||
"elder-scrolls-online-road-to-level-20": {597893},
|
||||
"playerunknown-s-battlegrounds": {597894},
|
||||
"black-ops-3-fun": {597897},
|
||||
"mortal-kombat-xl-the-funniest": {597899},
|
||||
"try-not-to-laugh-2": {597899},
|
||||
"call-of-duty-advanced-warfare-domination": {597898},
|
||||
"my-live-stream-with-du-recorder-5": {597900},
|
||||
"ls-h-ppchen-halloween-stream-vom-31-10-2": {597904},
|
||||
"ls-h-ppchen-halloween-stream-vom-31-10-3": {597904},
|
||||
"how-it-feels-to-chew-5-gum-funny-8": {597905},
|
||||
"live-stream-mu-club-america-3": {597918},
|
||||
"black-death": {597927},
|
||||
"lets-play-spore-with-3": {597929},
|
||||
"true-mov-2": {597933},
|
||||
"fortnite-w-pat-the-rat-pat-the-rat": {597935},
|
||||
"jugando-pokemon-esmeralda-gba": {597935},
|
||||
"talking-about-my-channel-and-much-more-4": {597936},
|
||||
"-14": {597939},
|
||||
"-15": {597939},
|
||||
"-16": {597939},
|
||||
"-17": {597939},
|
||||
"-18": {597939},
|
||||
"-20": {597939},
|
||||
"-21": {597939},
|
||||
"-24": {597939},
|
||||
"-25": {597939},
|
||||
"-26": {597939},
|
||||
"-27": {597939},
|
||||
"-28": {597939},
|
||||
"-29": {597939},
|
||||
"-31": {597941},
|
||||
"-34": {597941},
|
||||
"-6": {597939},
|
||||
"-7": {597939},
|
||||
"10-4": {612097},
|
||||
"10-6": {612097},
|
||||
"10-7": {612097},
|
||||
"10-diy": {612097},
|
||||
"10-twitch": {612097},
|
||||
"100-5": {597909},
|
||||
"189f2f04a378c02-1": {612097},
|
||||
"2011-2": {597917},
|
||||
"2011-3": {597917},
|
||||
"2c61c818687ed09-1": {612097},
|
||||
"5-diy-4": {612097},
|
||||
"@andymcdandycdn": {640212},
|
||||
"@lividjava": {651654},
|
||||
"@mhx": {653957},
|
||||
"@tipwhatyoulike": {599792},
|
||||
"@wibbels": {612195},
|
||||
"@yisraeldov": {647416},
|
||||
"beyaz-hap-biseks-el-evlat": {657957},
|
||||
"bilgisayar-al-t-rma-s-recinde-ya-ananlar": {657957},
|
||||
"brave-como-ganhar-dinheiro-todos-os-dias": {598494},
|
||||
"c81e728d9d4c2f6-1": {598178},
|
||||
"call-of-duty-world-war-2": {597935},
|
||||
"chain-reaction": {597940},
|
||||
"commodore-64-an-lar-ve-oyunlar": {657957},
|
||||
"counter-strike-global-offensive-gameplay": {597900},
|
||||
"dead-island-riptide-co-op-walkthrough-2": {597904, 598105},
|
||||
"diy-10": {612097},
|
||||
"diy-11": {612097},
|
||||
"diy-13": {612097},
|
||||
"diy-14": {612097},
|
||||
"diy-19": {612097},
|
||||
"diy-4": {612097},
|
||||
"diy-6": {612097},
|
||||
"diy-7": {612097},
|
||||
"diy-9": {612097},
|
||||
"doktor-ve-patron-sahnesinin-haz-rl-k-ve": {657957},
|
||||
"eat-the-street": {597910},
|
||||
"fallout-4-modded": {597901},
|
||||
"fallout-4-walkthrough": {597900},
|
||||
"filmli-efecast-129-film-inde-film-inde": {657957},
|
||||
"filmli-efecast-130-ger-ek-hayatta-anime": {657957},
|
||||
"filmli-efecast-97-netflix-filmi-form-l": {657957},
|
||||
"for-honor-2": {597932},
|
||||
"for-honor-4": {597932},
|
||||
"gta-5": {597902},
|
||||
"gta-5-2": {597902},
|
||||
"helldriver-g-n-n-ekstrem-filmi": {657957},
|
||||
"hi-4": {597933},
|
||||
"hi-5": {597933},
|
||||
"hi-7": {597933},
|
||||
"kizoa-movie-video-slideshow-maker": {597900, 597932},
|
||||
"l1011widebody-friends-let-s-play-3": {598070},
|
||||
"lbry": {608276},
|
||||
"lets-play-spore-with": {597930},
|
||||
"madants": {625032},
|
||||
"mechwarrior-2-soundtrack-clan-jade": {598070},
|
||||
"milo-forbidden-conversation": {655173},
|
||||
"mobile-record": {597910},
|
||||
"mouths": {607379},
|
||||
"mp-aleyna-tilki-nin-zorla-seyrettirilen": {657957},
|
||||
"mp-atat-rk-e-eytan-diyen-yunan-as-ll": {657957},
|
||||
"mp-bah-eli-calan-avukatlar-yla-g-r-s-n": {657957},
|
||||
"mp-bu-podcast-babalar-in": {657957},
|
||||
"mp-bu-podcasti-akp-li-tan-d-klar-n-za": {657957},
|
||||
"mp-gaziantep-te-tacizle-su-lan-p-dayak": {650409},
|
||||
"mp-hatipo-lu-nun-ermeni-bir-ocu-u-canl": {657957},
|
||||
"mp-k-rt-annelerin-hdp-ye-tepkisi": {657957},
|
||||
"mp-kenan-sofuo-lu-nun-mamo-lu-na-destek": {657957},
|
||||
"mp-mamo-lu-nun-muhafazakar-g-r-nmesi": {657957},
|
||||
"mp-mhp-akp-gerginli-i": {657957},
|
||||
"mp-otob-ste-t-rkle-meyin-diye-ba-ran-svi": {657957},
|
||||
"mp-pace-i-kazand-m-diyip-21-bin-dolar": {657957},
|
||||
"mp-rusya-da-kad-nlara-tecav-zc-s-n-ld": {657957},
|
||||
"mp-s-n-rs-z-nafakan-n-kalkmas-adil-mi": {657957},
|
||||
"mp-susamam-ark-s-ve-serkan-nci-nin-ark": {657957},
|
||||
"mp-y-lmaz-zdil-in-kitap-paralar-yla-yard": {657957},
|
||||
"mp-yang-n-u-aklar-pahal-diyen-orman": {657957},
|
||||
"mp-yeni-zelanda-katliam-ndan-siyasi-rant": {657957},
|
||||
"my-edited-video-4": {597932},
|
||||
"my-live-stream-with-du-recorder": {597900},
|
||||
"my-live-stream-with-du-recorder-3": {597900},
|
||||
"new-channel-intro": {598235},
|
||||
"paladins-3": {597900},
|
||||
"popstar-sahnesi-kamera-arkas-g-r-nt-leri": {657957},
|
||||
"retro-bilgisayar-bulu-mas": {657957},
|
||||
"scp-t-rk-e-scp-002-canl-oda": {657957},
|
||||
"steep": {597900},
|
||||
"stephen-hicks-postmodernism-reprise": {655173},
|
||||
"super-smash-bros-u-brawl-co-op-event": {595841},
|
||||
"super-smash-bros-u-super-mario-u-smash": {595839},
|
||||
"super-smash-bros-u-zelda-smash-series": {595841},
|
||||
"superonline-fiber-den-efsane-kaz-k-yedim": {657957},
|
||||
"talking-about-my-channel-and-much-more-5": {597936},
|
||||
"test1337reflector356": {627814},
|
||||
"the-last-of-us-remastered-2": {597915},
|
||||
"tom-clancy-s-ghost-recon-wildlands-2": {597916},
|
||||
"tom-clancy-s-rainbow-six-siege-3": {597935},
|
||||
"wwe-2k18-with-that-guy-and-tricky": {597901},
|
||||
"yay-nc-bob-afet-kamera-arkas": {657957},
|
||||
}
|
||||
}
|
||||
|
||||
var DelayWorkaroundsPart2 = generateDelayWorkaroundsPart2()
|
||||
|
||||
func generateDelayWorkaroundsPart2() map[string][]int32 {
|
||||
return map[string][]int32{
|
||||
"en-vivo-hablando-de-bitcoin-y-3": {664642},
|
||||
"en-vivo-hablando-de-bitcoin-y-4": {664642},
|
||||
"@gn": {752630, 755269},
|
||||
"putalocura": {809590},
|
||||
"@isc": {813832},
|
||||
"@pnl": {864618},
|
||||
"@dreamr": {875433},
|
||||
"2019-10-30": {878258},
|
||||
"papi-16": {884431},
|
||||
"papi-4": {884431},
|
||||
"papi-18": {884431},
|
||||
"papi-17": {884431},
|
||||
"papi-7": {884431},
|
||||
"papi-3": {884431},
|
||||
"papi-30": {884431},
|
||||
"papi": {884431},
|
||||
"papi-9": {884431},
|
||||
"papi-19": {884431},
|
||||
"papi-papi-2": {884431},
|
||||
"papi-6": {884431},
|
||||
"viaje-a-la-luna-": {887018, 887591, 888024},
|
||||
"fortnite1": {900015},
|
||||
"who-is-the-master-": {900787},
|
||||
"thp": {923634},
|
||||
"thm": {923635},
|
||||
"el-presidente": {923766},
|
||||
"@erikh526": {933294},
|
||||
}
|
||||
}
|
57
claimtrie/param/general.go
Normal file
57
claimtrie/param/general.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
package param
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
MaxActiveDelay int32
|
||||
ActiveDelayFactor int32
|
||||
|
||||
MaxNodeManagerCacheSize int
|
||||
|
||||
OriginalClaimExpirationTime int32
|
||||
ExtendedClaimExpirationTime int32
|
||||
ExtendedClaimExpirationForkHeight int32
|
||||
|
||||
MaxRemovalWorkaroundHeight int32
|
||||
|
||||
NormalizedNameForkHeight int32
|
||||
AllClaimsInMerkleForkHeight int32
|
||||
|
||||
ClaimtrieDataFolder string
|
||||
)
|
||||
|
||||
func SetNetwork(net wire.BitcoinNet, subfolder string) {
|
||||
MaxActiveDelay = 4032
|
||||
ActiveDelayFactor = 32
|
||||
MaxNodeManagerCacheSize = 16000
|
||||
appDir := btcutil.AppDataDir("chain", false)
|
||||
ClaimtrieDataFolder = filepath.Join(appDir, "data", subfolder, "claim_dbs")
|
||||
|
||||
switch net {
|
||||
case wire.MainNet:
|
||||
OriginalClaimExpirationTime = 262974
|
||||
ExtendedClaimExpirationTime = 2102400
|
||||
ExtendedClaimExpirationForkHeight = 400155 // https://lbry.io/news/hf1807
|
||||
MaxRemovalWorkaroundHeight = 658300
|
||||
NormalizedNameForkHeight = 539940 // targeting 21 March 2019}, https://lbry.com/news/hf1903
|
||||
AllClaimsInMerkleForkHeight = 658309 // targeting 30 Oct 2019}, https://lbry.com/news/hf1910
|
||||
case wire.TestNet3:
|
||||
OriginalClaimExpirationTime = 262974
|
||||
ExtendedClaimExpirationTime = 2102400
|
||||
ExtendedClaimExpirationForkHeight = 1
|
||||
MaxRemovalWorkaroundHeight = 100
|
||||
NormalizedNameForkHeight = 1
|
||||
AllClaimsInMerkleForkHeight = 109
|
||||
case wire.TestNet, wire.SimNet: // "regtest"
|
||||
OriginalClaimExpirationTime = 500
|
||||
ExtendedClaimExpirationTime = 600
|
||||
ExtendedClaimExpirationForkHeight = 800
|
||||
MaxRemovalWorkaroundHeight = -1
|
||||
NormalizedNameForkHeight = 250
|
||||
AllClaimsInMerkleForkHeight = 349
|
||||
}
|
||||
}
|
451
claimtrie/param/takeovers.go
Normal file
451
claimtrie/param/takeovers.go
Normal file
|
@ -0,0 +1,451 @@
|
|||
package param
|
||||
|
||||
var TakeoverWorkarounds = generateTakeoverWorkarounds()
|
||||
|
||||
func generateTakeoverWorkarounds() map[string]int { // TODO: the values here are unused; bools would probably be better
|
||||
return map[string]int{
|
||||
"496856_HunterxHunterAMV": 496835,
|
||||
"542978_namethattune1": 542429,
|
||||
"543508_namethattune-5": 543306,
|
||||
"546780_forecasts": 546624,
|
||||
"548730_forecasts": 546780,
|
||||
"551540_forecasts": 548730,
|
||||
"552380_chicthinkingofyou": 550804,
|
||||
"560363_takephotowithlbryteam": 559962,
|
||||
"563710_test-img": 563700,
|
||||
"566750_itila": 543261,
|
||||
"567082_malabarismo-com-bolas-de-futebol-vs-chap": 563592,
|
||||
"596860_180mphpullsthrougheurope": 596757,
|
||||
"617743_vaccines": 572756,
|
||||
"619609_copface-slamshandcuffedteengirlintoconcrete": 539940,
|
||||
"620392_banker-exposes-satanic-elite": 597788,
|
||||
"624997_direttiva-sulle-armi-ue-in-svizzera-di": 567908,
|
||||
"624997_best-of-apex": 585580,
|
||||
"629970_cannot-ignore-my-veins": 629914,
|
||||
"633058_bio-waste-we-programmed-your-brain": 617185,
|
||||
"633601_macrolauncher-overview-first-look": 633058,
|
||||
"640186_its-up-to-you-and-i-2019": 639116,
|
||||
"640241_tor-eas-3-20": 592645,
|
||||
"640522_seadoxdark": 619531,
|
||||
"640617_lbry-przewodnik-1-instalacja": 451186,
|
||||
"640623_avxchange-2019-the-next-netflix-spotify": 606790,
|
||||
"640684_algebra-introduction": 624152,
|
||||
"640684_a-high-school-math-teacher-does-a": 600885,
|
||||
"640684_another-random-life-update": 600884,
|
||||
"640684_who-is-the-taylor-series-for": 600882,
|
||||
"640684_tedx-talk-released": 612303,
|
||||
"640730_e-mental": 615375,
|
||||
"641143_amiga-1200-bespoke-virgin-cinema": 623542,
|
||||
"641161_dreamscape-432-omega": 618894,
|
||||
"641162_2019-topstone-carbon-force-etap-axs-bike": 639107,
|
||||
"641186_arin-sings-big-floppy-penis-live-jazz-2": 638904,
|
||||
"641421_edward-snowden-on-bitcoin-and-privacy": 522729,
|
||||
"641421_what-is-libra-facebook-s-new": 598236,
|
||||
"641421_what-are-stablecoins-counter-party-risk": 583508,
|
||||
"641421_anthony-pomp-pompliano-discusses-crypto": 564416,
|
||||
"641421_tim-draper-crypto-invest-summit-2019": 550329,
|
||||
"641421_mass-adoption-and-what-will-it-take-to": 549781,
|
||||
"641421_dragonwolftech-youtube-channel-trailer": 567128,
|
||||
"641421_naomi-brockwell-s-weekly-crypto-recap": 540006,
|
||||
"641421_blockchain-based-youtube-twitter": 580809,
|
||||
"641421_andreas-antonopoulos-on-privacy-privacy": 533522,
|
||||
"641817_mexico-submits-and-big-tech-worsens": 582977,
|
||||
"641817_why-we-need-travel-bans": 581354,
|
||||
"641880_censored-by-patreon-bitchute-shares": 482460,
|
||||
"641880_crypto-wonderland": 485218,
|
||||
"642168_1-diabolo-julio-cezar-16-cbmcp-freestyle": 374999,
|
||||
"642314_tough-students": 615780,
|
||||
"642697_gamercauldronep2": 642153,
|
||||
"643406_the-most-fun-i-ve-had-in-a-long-time": 616506,
|
||||
"643893_spitshine69-and-uk-freedom-audits": 616876,
|
||||
"644480_my-mum-getting-attacked-a-duck": 567624,
|
||||
"644486_the-cryptocurrency-experiment": 569189,
|
||||
"644486_tag-you-re-it": 558316,
|
||||
"644486_orange-county-mineral-society-rock-and": 397138,
|
||||
"644486_sampling-with-the-gold-rush-nugget": 527960,
|
||||
"644562_september-15-21-a-new-way-of-doing": 634792,
|
||||
"644562_july-week-3-collective-frequency-general": 607942,
|
||||
"644562_september-8-14-growing-up-general": 630977,
|
||||
"644562_august-4-10-collective-frequency-general": 612307,
|
||||
"644562_august-11-17-collective-frequency": 617279,
|
||||
"644562_september-1-7-gentle-wake-up-call": 627104,
|
||||
"644607_no-more-lol": 643497,
|
||||
"644607_minion-masters-who-knew": 641313,
|
||||
"645236_danganronpa-3-the-end-of-hope-s-peak": 644153,
|
||||
"645348_captchabot-a-discord-bot-to-protect-your": 592810,
|
||||
"645701_the-xero-hour-saint-greta-of-thunberg": 644081,
|
||||
"645701_batman-v-superman-theological-notions": 590189,
|
||||
"645918_emacs-is-great-ep-0-init-el-from-org": 575666,
|
||||
"645918_emacs-is-great-ep-1-packages": 575666,
|
||||
"645918_emacs-is-great-ep-40-pt-2-hebrew": 575668,
|
||||
"645923_nasal-snuff-review-osp-batch-2": 575658,
|
||||
"645923_why-bit-coin": 575658,
|
||||
"645929_begin-quest": 598822,
|
||||
"645929_filthy-foe": 588386,
|
||||
"645929_unsanitary-snow": 588386,
|
||||
"645929_famispam-1-music-box": 588386,
|
||||
"645929_running-away": 598822,
|
||||
"645931_my-beloved-chris-madsen": 589114,
|
||||
"645931_space-is-consciousness-chris-madsen": 589116,
|
||||
"645947_gasifier-rocket-stove-secondary-burn": 590595,
|
||||
"645949_mouse-razer-abyssus-v2-e-mousepad": 591139,
|
||||
"645949_pr-temporada-2018-league-of-legends": 591138,
|
||||
"645949_windows-10-build-9901-pt-br": 591137,
|
||||
"645949_abrindo-pacotes-do-festival-lunar-2018": 591139,
|
||||
"645949_unboxing-camisetas-personalizadas-play-e": 591138,
|
||||
"645949_abrindo-envelopes-do-festival-lunar-2017": 591138,
|
||||
"645951_grub-my-grub-played-guruku-tersayang": 618033,
|
||||
"645951_ismeeltimepiece": 618038,
|
||||
"645951_thoughts-on-doom": 596485,
|
||||
"645951_thoughts-on-god-of-war-about-as-deep-as": 596485,
|
||||
"645956_linux-lite-3-6-see-what-s-new": 645195,
|
||||
"646191_kahlil-gibran-the-prophet-part-1": 597637,
|
||||
"646551_crypto-market-crash-should-you-sell-your": 442613,
|
||||
"646551_live-crypto-trading-and-market-analysis": 442615,
|
||||
"646551_5-reasons-trading-is-always-better-than": 500850,
|
||||
"646551_digitex-futures-dump-panic-selling-or": 568065,
|
||||
"646552_how-to-install-polarr-on-kali-linux-bynp": 466235,
|
||||
"646586_electoral-college-kids-civics-lesson": 430818,
|
||||
"646602_grapes-full-90-minute-watercolour": 537108,
|
||||
"646602_meizu-mx4-the-second-ubuntu-phone": 537109,
|
||||
"646609_how-to-set-up-the-ledger-nano-x": 569992,
|
||||
"646609_how-to-buy-ethereum": 482354,
|
||||
"646609_how-to-install-setup-the-exodus-multi": 482356,
|
||||
"646609_how-to-manage-your-passwords-using": 531987,
|
||||
"646609_cryptodad-s-live-q-a-friday-may-3rd-2019": 562303,
|
||||
"646638_resident-evil-ada-chapter-5-final": 605612,
|
||||
"646639_taurus-june-2019-career-love-tarot": 586910,
|
||||
"646652_digital-bullpen-ep-5-building-a-digital": 589274,
|
||||
"646661_sunlight": 591076,
|
||||
"646661_grasp-lab-nasa-open-mct-series": 589414,
|
||||
"646663_bunnula-s-creepers-tim-pool-s-beanie-a": 599669,
|
||||
"646663_bunnula-music-hey-ya-by-outkast": 605685,
|
||||
"646663_bunnula-tv-s-music-television-eunoia": 644437,
|
||||
"646663_the-pussy-centipede-40-sneakers-and": 587265,
|
||||
"646663_bunnula-reacts-ashton-titty-whitty": 596988,
|
||||
"646677_filip-reviews-jeromes-dream-cataracts-so": 589751,
|
||||
"646691_fascism-and-its-mobilizing-passions": 464342,
|
||||
"646692_hsb-color-layers-action-for-adobe": 586533,
|
||||
"646692_master-colorist-action-pack-extracting": 631830,
|
||||
"646693_how-to-protect-your-garden-from-animals": 588476,
|
||||
"646693_gardening-for-the-apocalypse-epic": 588472,
|
||||
"646693_my-first-bee-hive-foundationless-natural": 588469,
|
||||
"646693_dragon-fruit-and-passion-fruit-planting": 588470,
|
||||
"646693_installing-my-first-foundationless": 588469,
|
||||
"646705_first-naza-fpv": 590411,
|
||||
"646717_first-burning-man-2019-detour-034": 630247,
|
||||
"646717_why-bob-marley-was-an-idiot-test-driving": 477558,
|
||||
"646717_we-are-addicted-to-gambling-ufc-207-w": 481398,
|
||||
"646717_ghetto-swap-meet-selling-storage-lockers": 498291,
|
||||
"646738_1-kings-chapter-7-summary-and-what-god": 586599,
|
||||
"646814_brand-spanking-new-junior-high-school": 592378,
|
||||
"646814_lupe-fiasco-freestyle-at-end-of-the-weak": 639535,
|
||||
"646824_how-to-one-stroke-painting-doodles-mixed": 592404,
|
||||
"646824_acrylic-pouring-landscape-with-a-tree": 592404,
|
||||
"646824_how-to-make-a-diy-concrete-paste-planter": 595976,
|
||||
"646824_how-to-make-a-rustic-sand-planter-sand": 592404,
|
||||
"646833_3-day-festival-at-the-galilee-lake-and": 592842,
|
||||
"646833_rainbow-circle-around-the-noon-sun-above": 592842,
|
||||
"646833_energetic-self-control-demonstration": 623811,
|
||||
"646833_bees-congregating": 592842,
|
||||
"646856_formula-offroad-honefoss-sunday-track2": 592872,
|
||||
"646862_h3video1-dc-vs-mb-1": 593237,
|
||||
"646862_h3video1-iwasgoingto-load-up-gmod-but": 593237,
|
||||
"646883_watch-this-game-developer-make-a-video": 592593,
|
||||
"646883_how-to-write-secure-javascript": 592593,
|
||||
"646883_blockchain-technology-explained-2-hour": 592593,
|
||||
"646888_fl-studio-bits": 608155,
|
||||
"646914_andy-s-shed-live-s03e02-the-longest": 592200,
|
||||
"646914_gpo-telephone-776-phone-restoration": 592201,
|
||||
"646916_toxic-studios-co-stream-pubg": 597126,
|
||||
"646916_hyperlapse-of-prague-praha-from-inside": 597109,
|
||||
"646933_videobits-1": 597378,
|
||||
"646933_clouds-developing-daytime-8": 597378,
|
||||
"646933_slechtvalk-in-watertoren-bodegraven": 597378,
|
||||
"646933_timelapse-maansverduistering-16-juli": 605880,
|
||||
"646933_startrails-27": 597378,
|
||||
"646933_passing-clouds-daytime-3": 597378,
|
||||
"646940_nerdgasm-unboxing-massive-playing-cards": 597421,
|
||||
"646946_debunking-cops-volume-3-the-murder-of": 630570,
|
||||
"646961_kingsong-ks16x-electric-unicycle-250km": 636725,
|
||||
"646968_wild-mountain-goats-amazing-rock": 621940,
|
||||
"646968_no-shelter-backcountry-camping-in": 621940,
|
||||
"646968_can-i-live-in-this-through-winter-lets": 645750,
|
||||
"646968_why-i-wear-a-chest-rig-backcountry-or": 621940,
|
||||
"646989_marc-ivan-o-gorman-promo-producer-editor": 645656,
|
||||
"647045_@moraltis": 646367,
|
||||
"647045_moraltis-twitch-highlights-first-edit": 646368,
|
||||
"647075_the-3-massive-tinder-convo-mistakes": 629464,
|
||||
"647075_how-to-get-friend-zoned-via-text": 592298,
|
||||
"647075_don-t-do-this-on-tinder": 624591,
|
||||
"647322_world-of-tanks-7-kills": 609905,
|
||||
"647322_the-tier-6-auto-loading-swedish-meatball": 591338,
|
||||
"647416_hypnotic-soundscapes-garden-of-the": 596923,
|
||||
"647416_hypnotic-soundscapes-the-cauldron-sacred": 596928,
|
||||
"647416_schumann-resonance-to-theta-sweep": 596920,
|
||||
"647416_conversational-indirect-hypnosis-why": 596913,
|
||||
"647493_mimirs-brunnr": 590498,
|
||||
"648143_live-ita-completiamo-the-evil-within-2": 646568,
|
||||
"648203_why-we-love-people-that-hurt-us": 591128,
|
||||
"648203_i-didn-t-like-my-baby-and-considered": 591128,
|
||||
"648220_trade-talk-001-i-m-a-vlogger-now-fielder": 597303,
|
||||
"648220_vise-restoration-record-no-6-vise": 597303,
|
||||
"648540_amv-reign": 571863,
|
||||
"648540_amv-virus": 571863,
|
||||
"648588_audial-drift-(a-journey-into-sound)": 630217,
|
||||
"648616_quick-zbrush-tip-transpose-master-scale": 463205,
|
||||
"648616_how-to-create-3d-horns-maya-to-zbrush-2": 463205,
|
||||
"648815_arduino-based-cartridge-game-handheld": 593252,
|
||||
"648815_a-maze-update-3-new-game-modes-amazing": 593252,
|
||||
"649209_denmark-trip": 591428,
|
||||
"649209_stunning-4k-drone-footage": 591428,
|
||||
"649215_how-to-create-a-channel-and-publish-a": 414908,
|
||||
"649215_lbryclass-11-how-to-get-your-deposit": 632420,
|
||||
"649543_spring-break-madness-at-universal": 599698,
|
||||
"649921_navegador-brave-navegador-da-web-seguro": 649261,
|
||||
"650191_stream-intro": 591301,
|
||||
"650946_platelet-chan-fan-art": 584601,
|
||||
"650946_aqua-fanart": 584601,
|
||||
"650946_virginmedia-stores-password-in-plain": 619537,
|
||||
"650946_running-linux-on-android-teaser": 604441,
|
||||
"650946_hatsune-miku-ievan-polka": 600126,
|
||||
"650946_digital-security-and-privacy-2-and-a-new": 600135,
|
||||
"650993_my-editorial-comment-on-recent-youtube": 590305,
|
||||
"650993_drive-7-18-2018": 590305,
|
||||
"651011_old-world-put-on-realm-realms-gg": 591899,
|
||||
"651011_make-your-own-soundboard-with-autohotkey": 591899,
|
||||
"651011_ark-survival-https-discord-gg-ad26xa": 637680,
|
||||
"651011_minecraft-featuring-seus-8-just-came-4": 596488,
|
||||
"651057_found-footage-bikinis-at-the-beach-with": 593586,
|
||||
"651057_found-footage-sexy-mom-a-mink-stole": 593586,
|
||||
"651067_who-are-the-gentiles-gomer": 597094,
|
||||
"651067_take-back-the-kingdom-ep-2-450-million": 597094,
|
||||
"651067_mmxtac-implemented-footstep-sounds-and": 597094,
|
||||
"651067_dynasoul-s-blender-to-unreal-animated": 597094,
|
||||
"651103_calling-a-scammer-syntax-error": 612532,
|
||||
"651103_quick-highlight-of-my-day": 647651,
|
||||
"651103_calling-scammers-and-singing-christmas": 612531,
|
||||
"651109_@livingtzm": 637322,
|
||||
"651109_living-tzm-juuso-from-finland-september": 643412,
|
||||
"651373_se-voc-rir-ou-sorrir-reinicie-o-v-deo": 649302,
|
||||
"651476_what-is-pagan-online-polished-new-arpg": 592157,
|
||||
"651476_must-have-elder-scrolls-online-addons": 592156,
|
||||
"651476_who-should-play-albion-online": 592156,
|
||||
"651730_person-detection-with-keras-tensorflow": 621276,
|
||||
"651730_youtube-censorship-take-two": 587249,
|
||||
"651730_new-red-tail-shark-and-two-silver-sharks": 587251,
|
||||
"651730_around-auckland": 587250,
|
||||
"651730_humanism-in-islam": 587250,
|
||||
"651730_tigers-at-auckland-zoo": 587250,
|
||||
"651730_gravity-demonstration": 587250,
|
||||
"651730_copyright-question": 587249,
|
||||
"651730_uberg33k-the-ultimate-software-developer": 599522,
|
||||
"651730_chl-e-swarbrick-auckland-mayoral": 587250,
|
||||
"651730_code-reviews": 587249,
|
||||
"651730_raising-robots": 587251,
|
||||
"651730_teaching-python": 587250,
|
||||
"651730_kelly-tarlton-2016": 587250,
|
||||
"652172_where-is-everything": 589491,
|
||||
"652172_some-guy-and-his-camera": 617062,
|
||||
"652172_practical-information-pt-1": 589491,
|
||||
"652172_latent-vibrations": 589491,
|
||||
"652172_maldek-compilation": 589491,
|
||||
"652444_thank-you-etika-thank-you-desmond": 652121,
|
||||
"652611_plants-vs-zombies-gw2-20190827183609": 624339,
|
||||
"652611_wolfenstein-the-new-order-playthrough-6": 650299,
|
||||
"652887_a-codeigniter-cms-open-source-download": 652737,
|
||||
"652966_@pokesadventures": 632391,
|
||||
"653009_flat-earth-uk-convention-is-a-bust": 585786,
|
||||
"653009_flat-earth-reset-flat-earth-money-tree": 585786,
|
||||
"653011_veil-of-thorns-dispirit-brutal-leech-3": 652475,
|
||||
"653069_being-born-after-9-11": 632218,
|
||||
"653069_8-years-on-youtube-what-it-has-done-for": 637130,
|
||||
"653069_answering-questions-how-original": 521447,
|
||||
"653069_talking-about-my-first-comedy-stand-up": 583450,
|
||||
"653069_doing-push-ups-in-public": 650920,
|
||||
"653069_vlog-extra": 465997,
|
||||
"653069_crying-myself": 465997,
|
||||
"653069_xbox-rejection": 465992,
|
||||
"653354_msps-how-to-find-a-linux-job-where-no": 642537,
|
||||
"653354_windows-is-better-than-linux-vlog-it-and": 646306,
|
||||
"653354_luke-smith-is-wrong-about-everything": 507717,
|
||||
"653354_advice-for-those-starting-out-in-tech": 612452,
|
||||
"653354_treating-yourself-to-make-studying-more": 623561,
|
||||
"653354_lpi-linux-essential-dns-tools-vlog-what": 559464,
|
||||
"653354_is-learning-linux-worth-it-in-2019-vlog": 570886,
|
||||
"653354_huawei-linux-and-cellphones-in-2019-vlog": 578501,
|
||||
"653354_how-to-use-webmin-to-manage-linux": 511507,
|
||||
"653354_latency-concurrency-and-the-best-value": 596857,
|
||||
"653354_how-to-use-the-pomodoro-method-in-it": 506632,
|
||||
"653354_negotiating-compensation-vlog-it-and": 542317,
|
||||
"653354_procedural-goals-vs-outcome-goals-vlog": 626785,
|
||||
"653354_intro-to-raid-understanding-how-raid": 529341,
|
||||
"653354_smokeping": 574693,
|
||||
"653354_richard-stallman-should-not-be-fired": 634928,
|
||||
"653354_unusual-or-specialty-certifications-vlog": 620146,
|
||||
"653354_gratitude-and-small-projects-vlog-it": 564900,
|
||||
"653354_why-linux-on-the-smartphone-is-important": 649543,
|
||||
"653354_opportunity-costs-vlog-it-devops-career": 549708,
|
||||
"653354_double-giveaway-lpi-class-dates-and": 608129,
|
||||
"653354_linux-on-the-smartphone-in-2019-librem": 530426,
|
||||
"653524_celtic-folk-music-full-live-concert-mps": 589762,
|
||||
"653745_aftermath-of-the-mac": 592768,
|
||||
"653745_b-c-a-glock-17-threaded-barrel": 592770,
|
||||
"653800_middle-earth-shadow-of-mordor-by": 590229,
|
||||
"654079_tomand-jeremy-chirs45": 614296,
|
||||
"654096_achamos-carteira-com-grana-olha-o-que": 466262,
|
||||
"654096_viagem-bizarra-e-cansativa-ao-nordeste": 466263,
|
||||
"654096_tedio-na-tailandia-limpeza-de-area": 466265,
|
||||
"654425_schau-bung-2014-in-windischgarsten": 654410,
|
||||
"654425_mitternachtseinlage-ball-rk": 654410,
|
||||
"654425_zugabe-ball-rk-windischgarsten": 654412,
|
||||
"654722_skytrain-in-korea": 463145,
|
||||
"654722_luwak-coffee-the-shit-coffee": 463155,
|
||||
"654722_puppet-show-in-bangkok-thailand": 462812,
|
||||
"654722_kyaito-market-myanmar": 462813,
|
||||
"654724_wipeout-zombies-bo3-custom-zombies-1st": 589569,
|
||||
"654724_the-street-bo3-custom-zombies": 589544,
|
||||
"654880_wwii-airsoft-pow": 586968,
|
||||
"654880_dueling-geese-fight-to-the-death": 586968,
|
||||
"654880_wwii-airsoft-torgau-raw-footage-part4": 586968,
|
||||
"655173_april-2019-q-and-a": 554032,
|
||||
"655173_the-meaning-and-reality-of-individual": 607892,
|
||||
"655173_steven-pinker-progress-despite": 616984,
|
||||
"655173_we-make-stories-out-of-totem-poles": 549090,
|
||||
"655173_jamil-jivani-author-of-why-young-men": 542035,
|
||||
"655173_commentaries-on-jb-peterson-rebel-wisdom": 528898,
|
||||
"655173_auckland-clip-4-on-cain-and-abel": 629242,
|
||||
"655173_peterson-vs-zizek-livestream-tickets": 545285,
|
||||
"655173_auckland-clip-3-the-dawning-of-the-moral": 621154,
|
||||
"655173_religious-belief-and-the-enlightenment": 606269,
|
||||
"655173_auckland-lc-highlight-1-the-presumption": 565783,
|
||||
"655173_q-a-sir-roger-scruton-dr-jordan-b": 544184,
|
||||
"655173_cancellation-polish-national-foundation": 562529,
|
||||
"655173_the-coddling-of-the-american-mind-haidt": 440185,
|
||||
"655173_02-harris-weinstein-peterson-discussion": 430896,
|
||||
"655173_jordan-peterson-threatens-everything-of": 519737,
|
||||
"655173_on-claiming-belief-in-god-commentary": 581738,
|
||||
"655173_how-to-make-the-world-better-really-with": 482317,
|
||||
"655173_quillette-discussion-with-founder-editor": 413749,
|
||||
"655173_jb-peterson-on-free-thought-and-speech": 462849,
|
||||
"655173_marxism-zizek-peterson-official-video": 578453,
|
||||
"655173_patreon-problem-solution-dave-rubin-dr": 490394,
|
||||
"655173_next-week-st-louis-salt-lake-city": 445933,
|
||||
"655173_conversations-with-john-anderson-jordan": 529981,
|
||||
"655173_nz-australia-12-rules-tour-next-2-weeks": 518649,
|
||||
"655173_a-call-to-rebellion-for-ontario-legal": 285451,
|
||||
"655173_2016-personality-lecture-12": 578465,
|
||||
"655173_on-the-vital-necessity-of-free-speech": 427404,
|
||||
"655173_2017-01-23-social-justice-freedom-of": 578465,
|
||||
"655173_discussion-sam-harris-the-idw-and-the": 423332,
|
||||
"655173_march-2018-patreon-q-a": 413749,
|
||||
"655173_take-aim-even-badly": 490395,
|
||||
"655173_jp-f-wwbgo6a2w": 539940,
|
||||
"655173_patreon-account-deletion": 503477,
|
||||
"655173_canada-us-europe-tour-august-dec-2018": 413749,
|
||||
"655173_leaders-myth-reality-general-stanley": 514333,
|
||||
"655173_jp-ifi5kkxig3s": 539940,
|
||||
"655173_documentary-a-glitch-in-the-matrix-david": 413749,
|
||||
"655173_2017-08-14-patreon-q-and-a": 285451,
|
||||
"655173_postmodernism-history-and-diagnosis": 285451,
|
||||
"655173_23-minutes-from-maps-of-meaning-the": 413749,
|
||||
"655173_milo-forbidden-conversation": 578493,
|
||||
"655173_jp-wnjbasba-qw": 539940,
|
||||
"655173_uk-12-rules-tour-october-and-november": 462849,
|
||||
"655173_2015-maps-of-meaning-10-culture-anomaly": 578465,
|
||||
"655173_ayaan-hirsi-ali-islam-mecca-vs-medina": 285452,
|
||||
"655173_jp-f9393el2z1i": 539940,
|
||||
"655173_campus-indoctrination-the-parasitization": 285453,
|
||||
"655173_jp-owgc63khcl8": 539940,
|
||||
"655173_the-death-and-resurrection-of-christ-a": 413749,
|
||||
"655173_01-harris-weinstein-peterson-discussion": 430896,
|
||||
"655173_enlightenment-now-steven-pinker-jb": 413749,
|
||||
"655173_the-lindsay-shepherd-affair-update": 413749,
|
||||
"655173_jp-g3fwumq5k8i": 539940,
|
||||
"655173_jp-evvs3l-abv4": 539940,
|
||||
"655173_former-australian-deputy-pm-john": 413750,
|
||||
"655173_message-to-my-korean-readers-90-seconds": 477424,
|
||||
"655173_jp--0xbomwjkgm": 539940,
|
||||
"655173_ben-shapiro-jordan-peterson-and-a-12": 413749,
|
||||
"655173_jp-91jwsb7zyhw": 539940,
|
||||
"655173_deconstruction-the-lindsay-shepherd": 299272,
|
||||
"655173_september-patreon-q-a": 285451,
|
||||
"655173_jp-2c3m0tt5kce": 539940,
|
||||
"655173_australia-s-john-anderson-dr-jordan-b": 413749,
|
||||
"655173_jp-hdrlq7dpiws": 539940,
|
||||
"655173_stephen-hicks-postmodernism-reprise": 578480,
|
||||
"655173_october-patreon-q-a": 285451,
|
||||
"655173_an-animated-intro-to-truth-order-and": 413749,
|
||||
"655173_jp-bsh37-x5rny": 539940,
|
||||
"655173_january-2019-q-a": 503477,
|
||||
"655173_comedians-canaries-and-coalmines": 498586,
|
||||
"655173_the-democrats-apology-and-promise": 465433,
|
||||
"655173_jp-s4c-jodptn8": 539940,
|
||||
"655173_2014-personality-lecture-16-extraversion": 578465,
|
||||
"655173_dr-jordan-b-peterson-on-femsplainers": 490395,
|
||||
"655173_higher-ed-our-cultural-inflection-point": 527291,
|
||||
"655173_archetype-reality-friendship-and": 519736,
|
||||
"655173_sir-roger-scruton-dr-jordan-b-peterson": 490395,
|
||||
"655173_jp-cf2nqmqifxc": 539940,
|
||||
"655173_penguin-uk-12-rules-for-life": 413749,
|
||||
"655173_march-2019-q-and-a": 537138,
|
||||
"655173_jp-ne5vbomsqjc": 539940,
|
||||
"655173_dublin-london-harris-murray-new-usa-12": 413749,
|
||||
"655173_12-rules-12-cities-tickets-now-available": 413749,
|
||||
"655173_jp-j9j-bvdrgdi": 539940,
|
||||
"655173_responsibility-conscience-and-meaning": 499123,
|
||||
"655173_04-harris-murray-peterson-discussion": 436678,
|
||||
"655173_jp-ayhaz9k008q": 539940,
|
||||
"655173_with-jocko-willink-the-catastrophe-of": 490395,
|
||||
"655173_interview-with-the-grievance-studies": 501296,
|
||||
"655173_russell-brand-jordan-b-peterson-under": 413750,
|
||||
"655173_goodbye-to-patreon": 496771,
|
||||
"655173_revamped-podcast-announcement-with": 540943,
|
||||
"655173_swedes-want-to-know": 285453,
|
||||
"655173_auckland-clip-2-the-four-fundamental": 607892,
|
||||
"655173_jp-dtirzqmgbdm": 539940,
|
||||
"655173_political-correctness-a-force-for-good-a": 413750,
|
||||
"655173_sean-plunket-full-interview-new-zealand": 597638,
|
||||
"655173_q-a-the-meaning-and-reality-of": 616984,
|
||||
"655173_lecture-and-q-a-with-jordan-peterson-the": 413749,
|
||||
"655173_2017-personality-07-carl-jung-and-the": 578465,
|
||||
"655173_nina-paley-animator-extraordinaire": 413750,
|
||||
"655173_truth-as-the-antidote-to-suffering-with": 455127,
|
||||
"655173_bishop-barron-word-on-fire": 599814,
|
||||
"655173_zizek-vs-peterson-april-19": 527291,
|
||||
"655173_revamped-podcast-with-westwood-one": 540943,
|
||||
"655173_2016-11-19-university-of-toronto-free": 578465,
|
||||
"655173_jp-1emrmtrj5jc": 539940,
|
||||
"655173_who-is-joe-rogan-with-jordan-peterson": 585578,
|
||||
"655173_who-dares-say-he-believes-in-god": 581738,
|
||||
"655252_games-with-live2d": 589978,
|
||||
"655252_kaenbyou-rin-live2d": 589978,
|
||||
"655374_steam-groups-are-crazy": 607590,
|
||||
"655379_asmr-captain-falcon-happily-beats-you-up": 644574,
|
||||
"655379_pixel-art-series-5-link-holding-the": 442952,
|
||||
"655379_who-can-cross-the-planck-length-the-hero": 610830,
|
||||
"655379_ssbb-the-yoshi-grab-release-crash": 609747,
|
||||
"655379_tas-captain-falcon-s-bizarre-adventure": 442958,
|
||||
"655379_super-smash-bros-in-360-test": 442963,
|
||||
"655379_what-if-luigi-was-b-u-f-f": 442971,
|
||||
"655803_sun-time-lapse-test-7": 610634,
|
||||
"655952_upper-build-complete": 591728,
|
||||
"656758_cryptocurrency-awareness-adoption-the": 541770,
|
||||
"656829_3d-printing-technologies-comparison": 462685,
|
||||
"656829_3d-printing-for-everyone": 462685,
|
||||
"657052_tni-punya-ilmu-kanuragan-gaya-baru": 657045,
|
||||
"657052_papa-sunimah-nelpon-sri-utami-emon": 657045,
|
||||
"657274_rapforlife-4-win": 656856,
|
||||
"657274_bizzilion-proof-of-withdrawal": 656856,
|
||||
"657420_quick-drawing-prince-tribute-colored": 605630,
|
||||
"657453_white-boy-tom-mcdonald-facts": 597169,
|
||||
"657453_is-it-ok-to-look-when-you-with-your-girl": 610508,
|
||||
"657584_need-for-speed-ryzen-5-1600-gtx-1050-ti": 657161,
|
||||
"657584_quantum-break-ryzen-5-1600-gtx-1050-ti-4": 657161,
|
||||
"657584_nightcore-legends-never-die": 657161,
|
||||
"657706_mtb-enduro-ferragosto-2019-sestri": 638904,
|
||||
"657706_warface-free-for-all": 638908,
|
||||
"657782_nick-warren-at-loveland-but-not-really": 444299,
|
||||
"658098_le-temps-nous-glisse-entre-les-doigts": 600099,
|
||||
}
|
||||
}
|
8
claimtrie/temporal/repo.go
Normal file
8
claimtrie/temporal/repo.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package temporal
|
||||
|
||||
// Repo defines APIs for Temporal to access persistence layer.
|
||||
type Repo interface {
|
||||
SetNodesAt(names [][]byte, heights []int32) error
|
||||
NodesAt(height int32) ([][]byte, error)
|
||||
Close() error
|
||||
}
|
41
claimtrie/temporal/temporalrepo/memory.go
Normal file
41
claimtrie/temporal/temporalrepo/memory.go
Normal file
|
@ -0,0 +1,41 @@
|
|||
package temporalrepo
|
||||
|
||||
type Memory struct {
|
||||
cache map[int32]map[string]bool
|
||||
}
|
||||
|
||||
func NewMemory() *Memory {
|
||||
return &Memory{
|
||||
cache: map[int32]map[string]bool{},
|
||||
}
|
||||
}
|
||||
|
||||
func (repo *Memory) SetNodesAt(names [][]byte, heights []int32) error {
|
||||
|
||||
for i, height := range heights {
|
||||
c, ok := repo.cache[height]
|
||||
if !ok {
|
||||
c = map[string]bool{}
|
||||
repo.cache[height] = c
|
||||
}
|
||||
name := string(names[i])
|
||||
c[name] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (repo *Memory) NodesAt(height int32) ([][]byte, error) {
|
||||
|
||||
var names [][]byte
|
||||
|
||||
for name := range repo.cache[height] {
|
||||
names = append(names, []byte(name))
|
||||
}
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (repo *Memory) Close() error {
|
||||
return nil
|
||||
}
|
93
claimtrie/temporal/temporalrepo/pebble.go
Normal file
93
claimtrie/temporal/temporalrepo/pebble.go
Normal file
|
@ -0,0 +1,93 @@
|
|||
package temporalrepo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/cockroachdb/pebble"
|
||||
)
|
||||
|
||||
type Pebble struct {
|
||||
db *pebble.DB
|
||||
}
|
||||
|
||||
func NewPebble(path string) (*Pebble, error) {
|
||||
|
||||
db, err := pebble.Open(path, &pebble.Options{Cache: pebble.NewCache(128 << 20)})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pebble open %s, %w", path, err)
|
||||
}
|
||||
|
||||
repo := &Pebble{db: db}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (repo *Pebble) SetNodesAt(name [][]byte, heights []int32) error {
|
||||
|
||||
// key format: height(4B) + 0(1B) + name(varable length)
|
||||
key := bytes.NewBuffer(nil)
|
||||
batch := repo.db.NewBatch()
|
||||
defer batch.Close()
|
||||
for i, name := range name {
|
||||
key.Reset()
|
||||
binary.Write(key, binary.BigEndian, heights[i])
|
||||
binary.Write(key, binary.BigEndian, byte(0))
|
||||
key.Write(name)
|
||||
|
||||
err := batch.Set(key.Bytes(), nil, pebble.NoSync)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble set: %w", err)
|
||||
}
|
||||
}
|
||||
return batch.Commit(pebble.NoSync)
|
||||
}
|
||||
|
||||
func (repo *Pebble) NodesAt(height int32) ([][]byte, error) {
|
||||
|
||||
prefix := bytes.NewBuffer(nil)
|
||||
binary.Write(prefix, binary.BigEndian, height)
|
||||
binary.Write(prefix, binary.BigEndian, byte(0))
|
||||
|
||||
end := bytes.NewBuffer(nil)
|
||||
binary.Write(end, binary.BigEndian, height)
|
||||
binary.Write(end, binary.BigEndian, byte(1))
|
||||
|
||||
prefixIterOptions := &pebble.IterOptions{
|
||||
LowerBound: prefix.Bytes(),
|
||||
UpperBound: end.Bytes(),
|
||||
}
|
||||
|
||||
var names [][]byte
|
||||
|
||||
iter := repo.db.NewIter(prefixIterOptions)
|
||||
for iter.First(); iter.Valid(); iter.Next() {
|
||||
// Skipping the first 5 bytes (height and a null byte), we get the name.
|
||||
name := make([]byte, len(iter.Key())-5)
|
||||
copy(name, iter.Key()[5:]) // iter.Key() reuses its buffer
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
err := iter.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pebble get: %w", err)
|
||||
}
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (repo *Pebble) Close() error {
|
||||
|
||||
err := repo.db.Flush()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble fludh: %w", err)
|
||||
}
|
||||
|
||||
err = repo.db.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("pebble close: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
80
claimtrie/temporal/temporalrepo/temporalrepo_test.go
Normal file
80
claimtrie/temporal/temporalrepo/temporalrepo_test.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package temporalrepo
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/claimtrie/temporal"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMemory(t *testing.T) {
|
||||
|
||||
repo := NewMemory()
|
||||
testTemporalRepo(t, repo)
|
||||
}
|
||||
|
||||
func TestPebble(t *testing.T) {
|
||||
|
||||
repo, err := NewPebble(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
testTemporalRepo(t, repo)
|
||||
}
|
||||
|
||||
func testTemporalRepo(t *testing.T, repo temporal.Repo) {
|
||||
|
||||
r := require.New(t)
|
||||
|
||||
nameA := []byte("a")
|
||||
nameB := []byte("b")
|
||||
nameC := []byte("c")
|
||||
|
||||
testcases := []struct {
|
||||
name []byte
|
||||
heights []int32
|
||||
}{
|
||||
{nameA, []int32{1, 3, 2}},
|
||||
{nameA, []int32{2, 3}},
|
||||
{nameB, []int32{5, 4}},
|
||||
{nameB, []int32{5, 1}},
|
||||
{nameC, []int32{4, 3, 8}},
|
||||
}
|
||||
|
||||
for _, i := range testcases {
|
||||
names := make([][]byte, 0, len(i.heights))
|
||||
for range i.heights {
|
||||
names = append(names, i.name)
|
||||
}
|
||||
err := repo.SetNodesAt(names, i.heights)
|
||||
r.NoError(err)
|
||||
}
|
||||
|
||||
// a: 1, 2, 3
|
||||
// b: 1, 5, 4
|
||||
// c: 4, 3, 8
|
||||
|
||||
names, err := repo.NodesAt(2)
|
||||
r.NoError(err)
|
||||
r.ElementsMatch([][]byte{nameA}, names)
|
||||
|
||||
names, err = repo.NodesAt(5)
|
||||
r.NoError(err)
|
||||
r.ElementsMatch([][]byte{nameB}, names)
|
||||
|
||||
names, err = repo.NodesAt(8)
|
||||
r.NoError(err)
|
||||
r.ElementsMatch([][]byte{nameC}, names)
|
||||
|
||||
names, err = repo.NodesAt(1)
|
||||
r.NoError(err)
|
||||
r.ElementsMatch([][]byte{nameA, nameB}, names)
|
||||
|
||||
names, err = repo.NodesAt(4)
|
||||
r.NoError(err)
|
||||
r.ElementsMatch([][]byte{nameB, nameC}, names)
|
||||
|
||||
names, err = repo.NodesAt(3)
|
||||
r.NoError(err)
|
||||
r.ElementsMatch([][]byte{nameA, nameC}, names)
|
||||
}
|
Loading…
Add table
Reference in a new issue