[lbry] claimtrie: import current snapshot

Sync to tip

Co-authored-by: Brannon King <countprimes@gmail.com>
This commit is contained in:
Roy Lee 2021-07-06 18:39:56 -07:00
parent 45627c7a6a
commit 6828cf5e36
54 changed files with 30107 additions and 0 deletions

View file

@ -0,0 +1,87 @@
package blockrepo
import (
"encoding/binary"
"github.com/pkg/errors"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/cockroachdb/pebble"
)
type Pebble struct {
db *pebble.DB
}
func NewPebble(path string) (*Pebble, error) {
db, err := pebble.Open(path, &pebble.Options{MaxOpenFiles: 2000})
repo := &Pebble{db: db}
return repo, errors.Wrapf(err, "unable to open %s", path)
}
func (repo *Pebble) Load() (int32, error) {
iter := repo.db.NewIter(nil)
if !iter.Last() {
err := iter.Close()
return 0, errors.Wrap(err, "closing iterator with no last")
}
height := int32(binary.BigEndian.Uint32(iter.Key()))
err := iter.Close()
return height, errors.Wrap(err, "closing iterator")
}
func (repo *Pebble) Get(height int32) (*chainhash.Hash, error) {
key := make([]byte, 4)
binary.BigEndian.PutUint32(key, uint32(height))
b, closer, err := repo.db.Get(key)
if closer != nil {
defer closer.Close()
}
if err != nil {
return nil, errors.Wrap(err, "in get")
}
hash, err := chainhash.NewHash(b)
return hash, errors.Wrap(err, "creating hash")
}
func (repo *Pebble) Set(height int32, hash *chainhash.Hash) error {
key := make([]byte, 4)
binary.BigEndian.PutUint32(key, uint32(height))
return errors.WithStack(repo.db.Set(key, hash[:], pebble.NoSync))
}
func (repo *Pebble) Delete(heightMin, heightMax int32) error {
lower := make([]byte, 4)
binary.BigEndian.PutUint32(lower, uint32(heightMin))
upper := make([]byte, 4)
binary.BigEndian.PutUint32(upper, uint32(heightMax)+1)
return errors.Wrap(repo.db.DeleteRange(lower, upper, pebble.NoSync), "on range delete")
}
func (repo *Pebble) Close() error {
err := repo.db.Flush()
if err != nil {
// if we fail to close are we going to try again later?
return errors.Wrap(err, "on flush")
}
err = repo.db.Close()
return errors.Wrap(err, "on close")
}
func (repo *Pebble) Flush() error {
_, err := repo.db.AsyncFlush()
return err
}

15
claimtrie/block/repo.go Normal file
View file

@ -0,0 +1,15 @@
package block
import (
"github.com/lbryio/lbcd/chaincfg/chainhash"
)
// Repo defines APIs for Block to access persistence layer.
type Repo interface {
Load() (int32, error)
Set(height int32, hash *chainhash.Hash) error
Get(height int32) (*chainhash.Hash, error)
Close() error
Flush() error
Delete(heightMin, heightMax int32) error
}

View file

@ -0,0 +1,77 @@
package chainrepo
import (
"encoding/binary"
"github.com/pkg/errors"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/vmihailenco/msgpack/v5"
"github.com/cockroachdb/pebble"
)
type Pebble struct {
db *pebble.DB
}
func NewPebble(path string) (*Pebble, error) {
db, err := pebble.Open(path, &pebble.Options{BytesPerSync: 64 << 20, MaxOpenFiles: 2000})
repo := &Pebble{db: db}
return repo, errors.Wrapf(err, "open %s", path)
}
func (repo *Pebble) Save(height int32, changes []change.Change) error {
if len(changes) == 0 {
return nil
}
var key [4]byte
binary.BigEndian.PutUint32(key[:], uint32(height))
value, err := msgpack.Marshal(changes)
if err != nil {
return errors.Wrap(err, "in marshaller")
}
err = repo.db.Set(key[:], value, pebble.NoSync)
return errors.Wrap(err, "in set")
}
func (repo *Pebble) Load(height int32) ([]change.Change, error) {
var key [4]byte
binary.BigEndian.PutUint32(key[:], uint32(height))
b, closer, err := repo.db.Get(key[:])
if closer != nil {
defer closer.Close()
}
if err != nil {
return nil, errors.Wrap(err, "in get")
}
var changes []change.Change
err = msgpack.Unmarshal(b, &changes)
return changes, errors.Wrap(err, "in unmarshaller")
}
func (repo *Pebble) Close() error {
err := repo.db.Flush()
if err != nil {
// if we fail to close are we going to try again later?
return errors.Wrap(err, "on flush")
}
err = repo.db.Close()
return errors.Wrap(err, "on close")
}
func (repo *Pebble) Flush() error {
_, err := repo.db.AsyncFlush()
return err
}

10
claimtrie/chain/repo.go Normal file
View file

@ -0,0 +1,10 @@
package chain
import "github.com/lbryio/lbcd/claimtrie/change"
type Repo interface {
Save(height int32, changes []change.Change) error
Load(height int32) ([]change.Change, error)
Close() error
Flush() error
}

112
claimtrie/change/change.go Normal file
View file

@ -0,0 +1,112 @@
package change
import (
"bytes"
"encoding/binary"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/wire"
)
type ChangeType uint32
const (
AddClaim ChangeType = iota
SpendClaim
UpdateClaim
AddSupport
SpendSupport
)
type Change struct {
Type ChangeType
Height int32
Name []byte
ClaimID ClaimID
OutPoint wire.OutPoint
Amount int64
ActiveHeight int32
VisibleHeight int32 // aka, CreatedAt; used for normalization fork
SpentChildren map[string]bool
}
func NewChange(typ ChangeType) Change {
return Change{Type: typ}
}
func (c Change) SetHeight(height int32) Change {
c.Height = height
return c
}
func (c Change) SetName(name []byte) Change {
c.Name = name // need to clone it?
return c
}
func (c Change) SetOutPoint(op *wire.OutPoint) Change {
c.OutPoint = *op
return c
}
func (c Change) SetAmount(amt int64) Change {
c.Amount = amt
return c
}
func (c *Change) Marshal(enc *bytes.Buffer) error {
enc.Write(c.ClaimID[:])
enc.Write(c.OutPoint.Hash[:])
var temp [8]byte
binary.BigEndian.PutUint32(temp[:4], c.OutPoint.Index)
enc.Write(temp[:4])
binary.BigEndian.PutUint32(temp[:4], uint32(c.Type))
enc.Write(temp[:4])
binary.BigEndian.PutUint32(temp[:4], uint32(c.Height))
enc.Write(temp[:4])
binary.BigEndian.PutUint32(temp[:4], uint32(c.ActiveHeight))
enc.Write(temp[:4])
binary.BigEndian.PutUint32(temp[:4], uint32(c.VisibleHeight))
enc.Write(temp[:4])
binary.BigEndian.PutUint64(temp[:], uint64(c.Amount))
enc.Write(temp[:])
if c.SpentChildren != nil {
binary.BigEndian.PutUint32(temp[:4], uint32(len(c.SpentChildren)))
enc.Write(temp[:4])
for key := range c.SpentChildren {
binary.BigEndian.PutUint16(temp[:2], uint16(len(key))) // technically limited to 255; not sure we trust it
enc.Write(temp[:2])
enc.WriteString(key)
}
} else {
binary.BigEndian.PutUint32(temp[:4], 0)
enc.Write(temp[:4])
}
return nil
}
func (c *Change) Unmarshal(dec *bytes.Buffer) error {
copy(c.ClaimID[:], dec.Next(ClaimIDSize))
copy(c.OutPoint.Hash[:], dec.Next(chainhash.HashSize))
c.OutPoint.Index = binary.BigEndian.Uint32(dec.Next(4))
c.Type = ChangeType(binary.BigEndian.Uint32(dec.Next(4)))
c.Height = int32(binary.BigEndian.Uint32(dec.Next(4)))
c.ActiveHeight = int32(binary.BigEndian.Uint32(dec.Next(4)))
c.VisibleHeight = int32(binary.BigEndian.Uint32(dec.Next(4)))
c.Amount = int64(binary.BigEndian.Uint64(dec.Next(8)))
keys := binary.BigEndian.Uint32(dec.Next(4))
if keys > 0 {
c.SpentChildren = map[string]bool{}
}
for keys > 0 {
keys--
keySize := int(binary.BigEndian.Uint16(dec.Next(2)))
key := string(dec.Next(keySize))
c.SpentChildren[key] = true
}
return nil
}

View file

@ -0,0 +1,54 @@
package change
import (
"encoding/binary"
"encoding/hex"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/wire"
btcutil "github.com/lbryio/lbcutil"
)
// ClaimID represents a Claim's ClaimID.
const ClaimIDSize = 20
type ClaimID [ClaimIDSize]byte
// NewClaimID returns a Claim ID calculated from Ripemd160(Sha256(OUTPOINT).
func NewClaimID(op wire.OutPoint) (id ClaimID) {
var buffer [chainhash.HashSize + 4]byte // hoping for stack alloc
copy(buffer[:], op.Hash[:])
binary.BigEndian.PutUint32(buffer[chainhash.HashSize:], op.Index)
copy(id[:], btcutil.Hash160(buffer[:]))
return id
}
// NewIDFromString returns a Claim ID from a string.
func NewIDFromString(s string) (id ClaimID, err error) {
if len(s) == 40 {
_, err = hex.Decode(id[:], []byte(s))
} else {
copy(id[:], s)
}
for i, j := 0, len(id)-1; i < j; i, j = i+1, j-1 {
id[i], id[j] = id[j], id[i]
}
return id, err
}
// Key is for in-memory maps
func (id ClaimID) Key() string {
return string(id[:])
}
// String is for anything written to a DB
func (id ClaimID) String() string {
for i, j := 0, len(id)-1; i < j; i, j = i+1, j-1 {
id[i], id[j] = id[j], id[i]
}
return hex.EncodeToString(id[:])
}

487
claimtrie/claimtrie.go Normal file
View file

@ -0,0 +1,487 @@
package claimtrie
import (
"bytes"
"fmt"
"path/filepath"
"runtime"
"sort"
"sync"
"github.com/pkg/errors"
"github.com/lbryio/lbcd/claimtrie/block"
"github.com/lbryio/lbcd/claimtrie/block/blockrepo"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/claimtrie/config"
"github.com/lbryio/lbcd/claimtrie/merkletrie"
"github.com/lbryio/lbcd/claimtrie/merkletrie/merkletrierepo"
"github.com/lbryio/lbcd/claimtrie/node"
"github.com/lbryio/lbcd/claimtrie/node/noderepo"
"github.com/lbryio/lbcd/claimtrie/normalization"
"github.com/lbryio/lbcd/claimtrie/param"
"github.com/lbryio/lbcd/claimtrie/temporal"
"github.com/lbryio/lbcd/claimtrie/temporal/temporalrepo"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/wire"
)
// ClaimTrie implements a Merkle Trie supporting linear history of commits.
type ClaimTrie struct {
// Repository for calculated block hashes.
blockRepo block.Repo
// Repository for storing temporal information of nodes at each block height.
// For example, which nodes (by name) should be refreshed at each block height
// due to stake expiration or delayed activation.
temporalRepo temporal.Repo
// Cache layer of Nodes.
nodeManager node.Manager
// Prefix tree (trie) that manages merkle hash of each node.
merkleTrie merkletrie.MerkleTrie
// Current block height, which is increased by one when AppendBlock() is called.
height int32
// Registrered cleanup functions which are invoked in the Close() in reverse order.
cleanups []func() error
}
func New(cfg config.Config) (*ClaimTrie, error) {
var cleanups []func() error
// The passed in cfg.DataDir has been prepended with netname.
dataDir := filepath.Join(cfg.DataDir, "claim_dbs")
dbPath := filepath.Join(dataDir, cfg.BlockRepoPebble.Path)
blockRepo, err := blockrepo.NewPebble(dbPath)
if err != nil {
return nil, errors.Wrap(err, "creating block repo")
}
cleanups = append(cleanups, blockRepo.Close)
err = blockRepo.Set(0, merkletrie.EmptyTrieHash)
if err != nil {
return nil, errors.Wrap(err, "setting block repo genesis")
}
dbPath = filepath.Join(dataDir, cfg.TemporalRepoPebble.Path)
temporalRepo, err := temporalrepo.NewPebble(dbPath)
if err != nil {
return nil, errors.Wrap(err, "creating temporal repo")
}
cleanups = append(cleanups, temporalRepo.Close)
// Initialize repository for changes to nodes.
// The cleanup is delegated to the Node Manager.
dbPath = filepath.Join(dataDir, cfg.NodeRepoPebble.Path)
nodeRepo, err := noderepo.NewPebble(dbPath)
if err != nil {
return nil, errors.Wrap(err, "creating node repo")
}
baseManager, err := node.NewBaseManager(nodeRepo)
if err != nil {
return nil, errors.Wrap(err, "creating node base manager")
}
normalizingManager := node.NewNormalizingManager(baseManager)
nodeManager := &node.HashV2Manager{Manager: normalizingManager}
cleanups = append(cleanups, nodeManager.Close)
var trie merkletrie.MerkleTrie
if cfg.RamTrie {
trie = merkletrie.NewRamTrie()
} else {
// Initialize repository for MerkleTrie. The cleanup is delegated to MerkleTrie.
dbPath = filepath.Join(dataDir, cfg.MerkleTrieRepoPebble.Path)
trieRepo, err := merkletrierepo.NewPebble(dbPath)
if err != nil {
return nil, errors.Wrap(err, "creating trie repo")
}
persistentTrie := merkletrie.NewPersistentTrie(trieRepo)
cleanups = append(cleanups, persistentTrie.Close)
trie = persistentTrie
}
// Restore the last height.
previousHeight, err := blockRepo.Load()
if err != nil {
return nil, errors.Wrap(err, "load block tip")
}
ct := &ClaimTrie{
blockRepo: blockRepo,
temporalRepo: temporalRepo,
nodeManager: nodeManager,
merkleTrie: trie,
height: previousHeight,
}
ct.cleanups = cleanups
if previousHeight > 0 {
hash, err := blockRepo.Get(previousHeight)
if err != nil {
ct.Close() // TODO: the cleanups aren't run when we exit with an err above here (but should be)
return nil, errors.Wrap(err, "block repo get")
}
_, err = nodeManager.IncrementHeightTo(previousHeight, false)
if err != nil {
ct.Close()
return nil, errors.Wrap(err, "increment height to")
}
err = trie.SetRoot(hash) // keep this after IncrementHeightTo
if err == merkletrie.ErrFullRebuildRequired {
ct.runFullTrieRebuild(nil, cfg.Interrupt)
}
if interruptRequested(cfg.Interrupt) || !ct.MerkleHash().IsEqual(hash) {
ct.Close()
return nil, errors.Errorf("unable to restore the claim hash to %s at height %d", hash.String(), previousHeight)
}
}
return ct, nil
}
// AddClaim adds a Claim to the ClaimTrie.
func (ct *ClaimTrie) AddClaim(name []byte, op wire.OutPoint, id change.ClaimID, amt int64) error {
chg := change.Change{
Type: change.AddClaim,
Name: name,
OutPoint: op,
Amount: amt,
ClaimID: id,
}
return ct.forwardNodeChange(chg)
}
// UpdateClaim updates a Claim in the ClaimTrie.
func (ct *ClaimTrie) UpdateClaim(name []byte, op wire.OutPoint, amt int64, id change.ClaimID) error {
chg := change.Change{
Type: change.UpdateClaim,
Name: name,
OutPoint: op,
Amount: amt,
ClaimID: id,
}
return ct.forwardNodeChange(chg)
}
// SpendClaim spends a Claim in the ClaimTrie.
func (ct *ClaimTrie) SpendClaim(name []byte, op wire.OutPoint, id change.ClaimID) error {
chg := change.Change{
Type: change.SpendClaim,
Name: name,
OutPoint: op,
ClaimID: id,
}
return ct.forwardNodeChange(chg)
}
// AddSupport adds a Support to the ClaimTrie.
func (ct *ClaimTrie) AddSupport(name []byte, op wire.OutPoint, amt int64, id change.ClaimID) error {
chg := change.Change{
Type: change.AddSupport,
Name: name,
OutPoint: op,
Amount: amt,
ClaimID: id,
}
return ct.forwardNodeChange(chg)
}
// SpendSupport spends a Support in the ClaimTrie.
func (ct *ClaimTrie) SpendSupport(name []byte, op wire.OutPoint, id change.ClaimID) error {
chg := change.Change{
Type: change.SpendSupport,
Name: name,
OutPoint: op,
ClaimID: id,
}
return ct.forwardNodeChange(chg)
}
// AppendBlock increases block by one.
func (ct *ClaimTrie) AppendBlock(temporary bool) error {
ct.height++
names, err := ct.nodeManager.IncrementHeightTo(ct.height, temporary)
if err != nil {
return errors.Wrap(err, "node manager increment")
}
expirations, err := ct.temporalRepo.NodesAt(ct.height)
if err != nil {
return errors.Wrap(err, "temporal repo get")
}
names = removeDuplicates(names) // comes out sorted
updateNames := make([][]byte, 0, len(names)+len(expirations))
updateHeights := make([]int32, 0, len(names)+len(expirations))
updateNames = append(updateNames, names...)
for range names { // log to the db that we updated a name at this height for rollback purposes
updateHeights = append(updateHeights, ct.height)
}
names = append(names, expirations...)
names = removeDuplicates(names)
nhns := ct.makeNameHashNext(names, false, nil)
for nhn := range nhns {
ct.merkleTrie.Update(nhn.Name, nhn.Hash, true)
if nhn.Next <= 0 {
continue
}
newName := normalization.NormalizeIfNecessary(nhn.Name, nhn.Next)
updateNames = append(updateNames, newName)
updateHeights = append(updateHeights, nhn.Next)
}
if !temporary && len(updateNames) > 0 {
err = ct.temporalRepo.SetNodesAt(updateNames, updateHeights)
if err != nil {
return errors.Wrap(err, "temporal repo set")
}
}
hitFork := ct.updateTrieForHashForkIfNecessary()
h := ct.MerkleHash()
if !temporary {
ct.blockRepo.Set(ct.height, h)
}
if hitFork {
err = ct.merkleTrie.SetRoot(h) // for clearing the memory entirely
}
return errors.Wrap(err, "merkle trie clear memory")
}
func (ct *ClaimTrie) updateTrieForHashForkIfNecessary() bool {
if ct.height != param.ActiveParams.AllClaimsInMerkleForkHeight {
return false
}
node.LogOnce(fmt.Sprintf("Rebuilding all trie nodes for the hash fork at %d...", ct.height))
ct.runFullTrieRebuild(nil, nil) // I don't think it's safe to allow interrupt during fork
return true
}
func removeDuplicates(names [][]byte) [][]byte { // this might be too expensive; we'll have to profile it
sort.Slice(names, func(i, j int) bool { // put names in order so we can skip duplicates
return bytes.Compare(names[i], names[j]) < 0
})
for i := len(names) - 2; i >= 0; i-- {
if bytes.Equal(names[i], names[i+1]) {
names = append(names[:i], names[i+1:]...)
}
}
return names
}
// ResetHeight resets the ClaimTrie to a previous known height..
func (ct *ClaimTrie) ResetHeight(height int32) error {
names := make([][]byte, 0)
for h := height + 1; h <= ct.height; h++ {
results, err := ct.temporalRepo.NodesAt(h)
if err != nil {
return err
}
names = append(names, results...)
}
names, err := ct.nodeManager.DecrementHeightTo(names, height)
if err != nil {
return err
}
passedHashFork := ct.height >= param.ActiveParams.AllClaimsInMerkleForkHeight && height < param.ActiveParams.AllClaimsInMerkleForkHeight
hash, err := ct.blockRepo.Get(height)
if err != nil {
return err
}
oldHeight := ct.height
ct.height = height // keep this before the rebuild
if passedHashFork {
names = nil // force them to reconsider all names
}
err = ct.merkleTrie.SetRoot(hash)
if err == merkletrie.ErrFullRebuildRequired {
ct.runFullTrieRebuild(names, nil)
}
if !ct.MerkleHash().IsEqual(hash) {
return errors.Errorf("unable to restore the hash at height %d", height)
}
return errors.WithStack(ct.blockRepo.Delete(height+1, oldHeight))
}
func (ct *ClaimTrie) runFullTrieRebuild(names [][]byte, interrupt <-chan struct{}) {
var nhns chan NameHashNext
if names == nil {
node.LogOnce("Building the entire claim trie in RAM...")
nhns = ct.makeNameHashNext(nil, true, interrupt)
} else {
nhns = ct.makeNameHashNext(names, false, interrupt)
}
for nhn := range nhns {
ct.merkleTrie.Update(nhn.Name, nhn.Hash, false)
}
}
// MerkleHash returns the Merkle Hash of the claimTrie.
func (ct *ClaimTrie) MerkleHash() *chainhash.Hash {
if ct.height >= param.ActiveParams.AllClaimsInMerkleForkHeight {
return ct.merkleTrie.MerkleHashAllClaims()
}
return ct.merkleTrie.MerkleHash()
}
// Height returns the current block height.
func (ct *ClaimTrie) Height() int32 {
return ct.height
}
// Close persists states.
// Any calls to the ClaimTrie after Close() being called results undefined behaviour.
func (ct *ClaimTrie) Close() {
for i := len(ct.cleanups) - 1; i >= 0; i-- {
cleanup := ct.cleanups[i]
err := cleanup()
if err != nil { // it would be better to cleanup what we can than exit early
node.LogOnce("On cleanup: " + err.Error())
}
}
ct.cleanups = nil
}
func (ct *ClaimTrie) forwardNodeChange(chg change.Change) error {
chg.Height = ct.Height() + 1
ct.nodeManager.AppendChange(chg)
return nil
}
func (ct *ClaimTrie) NodeAt(height int32, name []byte) (*node.Node, error) {
return ct.nodeManager.NodeAt(height, name)
}
func (ct *ClaimTrie) NamesChangedInBlock(height int32) ([]string, error) {
hits, err := ct.temporalRepo.NodesAt(height)
r := make([]string, len(hits))
for i := range hits {
r[i] = string(hits[i])
}
return r, err
}
func (ct *ClaimTrie) FlushToDisk() {
// maybe the user can fix the file lock shown in the warning before they shut down
if err := ct.nodeManager.Flush(); err != nil {
node.Warn("During nodeManager flush: " + err.Error())
}
if err := ct.temporalRepo.Flush(); err != nil {
node.Warn("During temporalRepo flush: " + err.Error())
}
if err := ct.merkleTrie.Flush(); err != nil {
node.Warn("During merkleTrie flush: " + err.Error())
}
if err := ct.blockRepo.Flush(); err != nil {
node.Warn("During blockRepo flush: " + err.Error())
}
}
type NameHashNext struct {
Name []byte
Hash *chainhash.Hash
Next int32
}
func interruptRequested(interrupted <-chan struct{}) bool {
select {
case <-interrupted: // should never block on nil
return true
default:
}
return false
}
func (ct *ClaimTrie) makeNameHashNext(names [][]byte, all bool, interrupt <-chan struct{}) chan NameHashNext {
inputs := make(chan []byte, 512)
outputs := make(chan NameHashNext, 512)
var wg sync.WaitGroup
hashComputationWorker := func() {
for name := range inputs {
hash, next := ct.nodeManager.Hash(name)
outputs <- NameHashNext{name, hash, next}
}
wg.Done()
}
threads := int(0.8 * float32(runtime.NumCPU()))
if threads < 1 {
threads = 1
}
for threads > 0 {
threads--
wg.Add(1)
go hashComputationWorker()
}
go func() {
if all {
ct.nodeManager.IterateNames(func(name []byte) bool {
if interruptRequested(interrupt) {
return false
}
clone := make([]byte, len(name))
copy(clone, name) // iteration name buffer is reused on future loops
inputs <- clone
return true
})
} else {
for _, name := range names {
if interruptRequested(interrupt) {
break
}
inputs <- name
}
}
close(inputs)
}()
go func() {
wg.Wait()
close(outputs)
}()
return outputs
}

1027
claimtrie/claimtrie_test.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,98 @@
package cmd
import (
"fmt"
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(NewBlocCommands())
}
func NewBlocCommands() *cobra.Command {
cmd := &cobra.Command{
Use: "block",
Short: "Block related commands",
}
cmd.AddCommand(NewBlockBestCommand())
cmd.AddCommand(NewBlockListCommand())
return cmd
}
func NewBlockBestCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "best",
Short: "Show the height and hash of the best block",
RunE: func(cmd *cobra.Command, args []string) error {
db, err := loadBlocksDB()
if err != nil {
return errors.Wrapf(err, "load blocks database")
}
defer db.Close()
chain, err := loadChain(db)
if err != nil {
return errors.Wrapf(err, "load chain")
}
state := chain.BestSnapshot()
fmt.Printf("Block %7d: %s\n", state.Height, state.Hash.String())
return nil
},
}
return cmd
}
func NewBlockListCommand() *cobra.Command {
var fromHeight int32
var toHeight int32
cmd := &cobra.Command{
Use: "list",
Short: "List merkle hash of blocks between <from_height> <to_height>",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
db, err := loadBlocksDB()
if err != nil {
return errors.Wrapf(err, "load blocks database")
}
defer db.Close()
chain, err := loadChain(db)
if err != nil {
return errors.Wrapf(err, "load chain")
}
if toHeight > chain.BestSnapshot().Height {
toHeight = chain.BestSnapshot().Height
}
for ht := fromHeight; ht <= toHeight; ht++ {
hash, err := chain.BlockHashByHeight(ht)
if err != nil {
return errors.Wrapf(err, "load hash for %d", ht)
}
fmt.Printf("Block %7d: %s\n", ht, hash.String())
}
return nil
},
}
cmd.Flags().Int32Var(&fromHeight, "from", 0, "From height (inclusive)")
cmd.Flags().Int32Var(&toHeight, "to", 0, "To height (inclusive)")
cmd.Flags().SortFlags = false
return cmd
}

441
claimtrie/cmd/cmd/chain.go Normal file
View file

@ -0,0 +1,441 @@
package cmd
import (
"os"
"path/filepath"
"sync"
"time"
"github.com/lbryio/lbcd/blockchain"
"github.com/lbryio/lbcd/claimtrie"
"github.com/lbryio/lbcd/claimtrie/chain"
"github.com/lbryio/lbcd/claimtrie/chain/chainrepo"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/claimtrie/config"
"github.com/lbryio/lbcd/database"
_ "github.com/lbryio/lbcd/database/ffldb"
"github.com/lbryio/lbcd/txscript"
"github.com/lbryio/lbcd/wire"
btcutil "github.com/lbryio/lbcutil"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(NewChainCommands())
}
func NewChainCommands() *cobra.Command {
cmd := &cobra.Command{
Use: "chain",
Short: "chain related command",
}
cmd.AddCommand(NewChainDumpCommand())
cmd.AddCommand(NewChainReplayCommand())
cmd.AddCommand(NewChainConvertCommand())
return cmd
}
func NewChainDumpCommand() *cobra.Command {
var chainRepoPath string
var fromHeight int32
var toHeight int32
cmd := &cobra.Command{
Use: "dump",
Short: "Dump the chain changes between <fromHeight> and <toHeight>",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
dbPath := chainRepoPath
log.Debugf("Open chain repo: %q", dbPath)
chainRepo, err := chainrepo.NewPebble(dbPath)
if err != nil {
return errors.Wrapf(err, "open chain repo")
}
for height := fromHeight; height <= toHeight; height++ {
changes, err := chainRepo.Load(height)
if errors.Is(err, pebble.ErrNotFound) {
continue
}
if err != nil {
return errors.Wrapf(err, "load charnges for height: %d")
}
for _, chg := range changes {
showChange(chg)
}
}
return nil
},
}
cmd.Flags().StringVar(&chainRepoPath, "chaindb", "chain_db", "Claim operation database")
cmd.Flags().Int32Var(&fromHeight, "from", 0, "From height (inclusive)")
cmd.Flags().Int32Var(&toHeight, "to", 0, "To height (inclusive)")
cmd.Flags().SortFlags = false
return cmd
}
func NewChainReplayCommand() *cobra.Command {
var chainRepoPath string
var fromHeight int32
var toHeight int32
cmd := &cobra.Command{
Use: "replay",
Short: "Replay the chain changes between <fromHeight> and <toHeight>",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
for _, dbName := range []string{
cfg.BlockRepoPebble.Path,
cfg.NodeRepoPebble.Path,
cfg.MerkleTrieRepoPebble.Path,
cfg.TemporalRepoPebble.Path,
} {
dbPath := filepath.Join(dataDir, netName, "claim_dbs", dbName)
log.Debugf("Delete repo: %q", dbPath)
err := os.RemoveAll(dbPath)
if err != nil {
return errors.Wrapf(err, "delete repo: %q", dbPath)
}
}
log.Debugf("Open chain repo: %q", chainRepoPath)
chainRepo, err := chainrepo.NewPebble(chainRepoPath)
if err != nil {
return errors.Wrapf(err, "open chain repo")
}
cfg := config.DefaultConfig
cfg.RamTrie = true
cfg.DataDir = filepath.Join(dataDir, netName)
ct, err := claimtrie.New(cfg)
if err != nil {
return errors.Wrapf(err, "create claimtrie")
}
defer ct.Close()
db, err := loadBlocksDB()
if err != nil {
return errors.Wrapf(err, "load blocks database")
}
chain, err := loadChain(db)
if err != nil {
return errors.Wrapf(err, "load chain")
}
startTime := time.Now()
for ht := fromHeight; ht < toHeight; ht++ {
changes, err := chainRepo.Load(ht + 1)
if errors.Is(err, pebble.ErrNotFound) {
// do nothing.
} else if err != nil {
return errors.Wrapf(err, "load changes for block %d", ht)
}
for _, chg := range changes {
switch chg.Type {
case change.AddClaim:
err = ct.AddClaim(chg.Name, chg.OutPoint, chg.ClaimID, chg.Amount)
case change.UpdateClaim:
err = ct.UpdateClaim(chg.Name, chg.OutPoint, chg.Amount, chg.ClaimID)
case change.SpendClaim:
err = ct.SpendClaim(chg.Name, chg.OutPoint, chg.ClaimID)
case change.AddSupport:
err = ct.AddSupport(chg.Name, chg.OutPoint, chg.Amount, chg.ClaimID)
case change.SpendSupport:
err = ct.SpendSupport(chg.Name, chg.OutPoint, chg.ClaimID)
default:
err = errors.Errorf("invalid change type: %v", chg)
}
if err != nil {
return errors.Wrapf(err, "execute change %v", chg)
}
}
err = appendBlock(ct, chain)
if err != nil {
return errors.Wrapf(err, "appendBlock")
}
if time.Since(startTime) > 5*time.Second {
log.Infof("Block: %d", ct.Height())
startTime = time.Now()
}
}
return nil
},
}
cmd.Flags().StringVar(&chainRepoPath, "chaindb", "chain_db", "Claim operation database")
cmd.Flags().Int32Var(&fromHeight, "from", 0, "From height")
cmd.Flags().Int32Var(&toHeight, "to", 0, "To height")
cmd.Flags().SortFlags = false
return cmd
}
func appendBlock(ct *claimtrie.ClaimTrie, chain *blockchain.BlockChain) error {
err := ct.AppendBlock(false)
if err != nil {
return errors.Wrapf(err, "append block: %w")
}
blockHash, err := chain.BlockHashByHeight(ct.Height())
if err != nil {
return errors.Wrapf(err, "load from block repo: %w")
}
header, err := chain.HeaderByHash(blockHash)
if err != nil {
return errors.Wrapf(err, "load from block repo: %w")
}
if *ct.MerkleHash() != header.ClaimTrie {
return errors.Errorf("hash mismatched at height %5d: exp: %s, got: %s",
ct.Height(), header.ClaimTrie, ct.MerkleHash())
}
return nil
}
func NewChainConvertCommand() *cobra.Command {
var chainRepoPath string
var toHeight int32
cmd := &cobra.Command{
Use: "convert",
Short: "convert changes from 0 to <toHeight>",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
db, err := loadBlocksDB()
if err != nil {
return errors.Wrapf(err, "load block db")
}
defer db.Close()
chain, err := loadChain(db)
if err != nil {
return errors.Wrapf(err, "load block db")
}
if toHeight > chain.BestSnapshot().Height {
toHeight = chain.BestSnapshot().Height
}
chainRepo, err := chainrepo.NewPebble(chainRepoPath)
if err != nil {
return errors.Wrapf(err, "open chain repo: %v")
}
defer chainRepo.Close()
converter := chainConverter{
db: db,
chain: chain,
chainRepo: chainRepo,
toHeight: toHeight,
blockChan: make(chan *btcutil.Block, 1000),
changesChan: make(chan []change.Change, 1000),
wg: &sync.WaitGroup{},
stat: &stat{},
}
startTime := time.Now()
err = converter.start()
if err != nil {
return errors.Wrapf(err, "start Converter")
}
converter.wait()
log.Infof("Convert chain: took %s", time.Since(startTime))
return nil
},
}
cmd.Flags().StringVar(&chainRepoPath, "chaindb", "chain_db", "Claim operation database")
cmd.Flags().Int32Var(&toHeight, "to", 0, "toHeight")
cmd.Flags().SortFlags = false
return cmd
}
type stat struct {
blocksFetched int
blocksProcessed int
changesSaved int
}
type chainConverter struct {
db database.DB
chain *blockchain.BlockChain
chainRepo chain.Repo
toHeight int32
blockChan chan *btcutil.Block
changesChan chan []change.Change
wg *sync.WaitGroup
stat *stat
}
func (cc *chainConverter) wait() {
cc.wg.Wait()
}
func (cb *chainConverter) start() error {
go cb.reportStats()
cb.wg.Add(3)
go cb.getBlock()
go cb.processBlock()
go cb.saveChanges()
return nil
}
func (cb *chainConverter) getBlock() {
defer cb.wg.Done()
defer close(cb.blockChan)
for ht := int32(0); ht < cb.toHeight; ht++ {
block, err := cb.chain.BlockByHeight(ht)
if err != nil {
if errors.Cause(err).Error() == "too many open files" {
err = errors.WithHintf(err, "try ulimit -n 2048")
}
log.Errorf("load changes at %d: %s", ht, err)
return
}
cb.stat.blocksFetched++
cb.blockChan <- block
}
}
func (cb *chainConverter) processBlock() {
defer cb.wg.Done()
defer close(cb.changesChan)
utxoPubScripts := map[wire.OutPoint][]byte{}
for block := range cb.blockChan {
var changes []change.Change
for _, tx := range block.Transactions() {
if blockchain.IsCoinBase(tx) {
continue
}
for _, txIn := range tx.MsgTx().TxIn {
prevOutpoint := txIn.PreviousOutPoint
pkScript := utxoPubScripts[prevOutpoint]
cs, err := txscript.ExtractClaimScript(pkScript)
if txscript.IsErrorCode(err, txscript.ErrNotClaimScript) {
continue
}
if err != nil {
log.Criticalf("Can't parse claim script: %s", err)
}
chg := change.Change{
Height: block.Height(),
Name: cs.Name,
OutPoint: txIn.PreviousOutPoint,
}
delete(utxoPubScripts, prevOutpoint)
switch cs.Opcode {
case txscript.OP_CLAIMNAME:
chg.Type = change.SpendClaim
chg.ClaimID = change.NewClaimID(chg.OutPoint)
case txscript.OP_UPDATECLAIM:
chg.Type = change.SpendClaim
copy(chg.ClaimID[:], cs.ClaimID)
case txscript.OP_SUPPORTCLAIM:
chg.Type = change.SpendSupport
copy(chg.ClaimID[:], cs.ClaimID)
}
changes = append(changes, chg)
}
op := *wire.NewOutPoint(tx.Hash(), 0)
for i, txOut := range tx.MsgTx().TxOut {
cs, err := txscript.ExtractClaimScript(txOut.PkScript)
if txscript.IsErrorCode(err, txscript.ErrNotClaimScript) {
continue
}
op.Index = uint32(i)
chg := change.Change{
Height: block.Height(),
Name: cs.Name,
OutPoint: op,
Amount: txOut.Value,
}
utxoPubScripts[op] = txOut.PkScript
switch cs.Opcode {
case txscript.OP_CLAIMNAME:
chg.Type = change.AddClaim
chg.ClaimID = change.NewClaimID(op)
case txscript.OP_SUPPORTCLAIM:
chg.Type = change.AddSupport
copy(chg.ClaimID[:], cs.ClaimID)
case txscript.OP_UPDATECLAIM:
chg.Type = change.UpdateClaim
copy(chg.ClaimID[:], cs.ClaimID)
}
changes = append(changes, chg)
}
}
cb.stat.blocksProcessed++
if len(changes) != 0 {
cb.changesChan <- changes
}
}
}
func (cb *chainConverter) saveChanges() {
defer cb.wg.Done()
for changes := range cb.changesChan {
err := cb.chainRepo.Save(changes[0].Height, changes)
if err != nil {
log.Errorf("save to chain repo: %s", err)
return
}
cb.stat.changesSaved++
}
}
func (cb *chainConverter) reportStats() {
stat := cb.stat
tick := time.NewTicker(5 * time.Second)
for range tick.C {
log.Infof("block : %7d / %7d, changes saved: %d",
stat.blocksFetched, stat.blocksProcessed, stat.changesSaved)
}
}

View file

@ -0,0 +1,62 @@
package cmd
import (
"path/filepath"
"time"
"github.com/lbryio/lbcd/blockchain"
"github.com/lbryio/lbcd/chaincfg"
"github.com/lbryio/lbcd/database"
"github.com/lbryio/lbcd/txscript"
"github.com/cockroachdb/errors"
)
func loadBlocksDB() (database.DB, error) {
dbPath := filepath.Join(dataDir, netName, "blocks_ffldb")
log.Infof("Loading blocks database: %s", dbPath)
db, err := database.Open("ffldb", dbPath, chainPramas().Net)
if err != nil {
return nil, errors.Wrapf(err, "open blocks database")
}
return db, nil
}
func loadChain(db database.DB) (*blockchain.BlockChain, error) {
paramsCopy := chaincfg.MainNetParams
log.Infof("Loading chain from database")
startTime := time.Now()
chain, err := blockchain.New(&blockchain.Config{
DB: db,
ChainParams: &paramsCopy,
TimeSource: blockchain.NewMedianTime(),
SigCache: txscript.NewSigCache(1000),
})
if err != nil {
return nil, errors.Wrapf(err, "create blockchain")
}
log.Infof("Loaded chain from database (%s)", time.Since(startTime))
return chain, err
}
func chainPramas() chaincfg.Params {
// Make a copy so the user won't modify the global instance.
params := chaincfg.MainNetParams
switch netName {
case "mainnet":
params = chaincfg.MainNetParams
case "testnet":
params = chaincfg.TestNet3Params
case "regtest":
params = chaincfg.RegressionNetParams
}
return params
}

View file

@ -0,0 +1,105 @@
package cmd
import (
"fmt"
"path/filepath"
"github.com/lbryio/lbcd/claimtrie/merkletrie"
"github.com/lbryio/lbcd/claimtrie/merkletrie/merkletrierepo"
"github.com/lbryio/lbcd/claimtrie/temporal/temporalrepo"
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(NewTrieCommands())
}
func NewTrieCommands() *cobra.Command {
cmd := &cobra.Command{
Use: "trie",
Short: "MerkleTrie related commands",
}
cmd.AddCommand(NewTrieNameCommand())
return cmd
}
func NewTrieNameCommand() *cobra.Command {
var height int32
var name string
cmd := &cobra.Command{
Use: "name",
Short: "List the claim and child hashes at vertex name of block at height",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
db, err := loadBlocksDB()
if err != nil {
return errors.Wrapf(err, "load blocks database")
}
defer db.Close()
chain, err := loadChain(db)
if err != nil {
return errors.Wrapf(err, "load chain")
}
state := chain.BestSnapshot()
fmt.Printf("Block %7d: %s\n", state.Height, state.Hash.String())
if height > state.Height {
return errors.New("requested height is unavailable")
}
hash := state.Hash
dbPath := filepath.Join(dataDir, netName, "claim_dbs", cfg.MerkleTrieRepoPebble.Path)
log.Debugf("Open merkletrie repo: %q", dbPath)
trieRepo, err := merkletrierepo.NewPebble(dbPath)
if err != nil {
return errors.Wrapf(err, "open merkle trie repo")
}
trie := merkletrie.NewPersistentTrie(trieRepo)
defer trie.Close()
trie.SetRoot(&hash)
if len(name) > 1 {
trie.Dump(name)
return nil
}
dbPath = filepath.Join(dataDir, netName, "claim_dbs", cfg.TemporalRepoPebble.Path)
log.Debugf("Open temporal repo: %q", dbPath)
tmpRepo, err := temporalrepo.NewPebble(dbPath)
if err != nil {
return errors.Wrapf(err, "open temporal repo")
}
nodes, err := tmpRepo.NodesAt(height)
if err != nil {
return errors.Wrapf(err, "read temporal repo at %d", height)
}
for _, name := range nodes {
fmt.Printf("Name: %s, ", string(name))
trie.Dump(string(name))
}
return nil
},
}
cmd.Flags().Int32Var(&height, "height", 0, "Height")
cmd.Flags().StringVar(&name, "name", "", "Name")
cmd.Flags().SortFlags = false
return cmd
}

194
claimtrie/cmd/cmd/node.go Normal file
View file

@ -0,0 +1,194 @@
package cmd
import (
"encoding/hex"
"fmt"
"math"
"path/filepath"
"github.com/cockroachdb/errors"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/claimtrie/node"
"github.com/lbryio/lbcd/claimtrie/node/noderepo"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(NewNodeCommands())
}
func NewNodeCommands() *cobra.Command {
cmd := &cobra.Command{
Use: "node",
Short: "Replay the application of changes on a node up to certain height",
}
cmd.AddCommand(NewNodeDumpCommand())
cmd.AddCommand(NewNodeReplayCommand())
cmd.AddCommand(NewNodeChildrenCommand())
cmd.AddCommand(NewNodeStatsCommand())
return cmd
}
func NewNodeDumpCommand() *cobra.Command {
var name string
var height int32
cmd := &cobra.Command{
Use: "dump",
Short: "Replay the application of changes on a node up to certain height",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
dbPath := filepath.Join(dataDir, netName, "claim_dbs", cfg.NodeRepoPebble.Path)
log.Debugf("Open node repo: %q", dbPath)
repo, err := noderepo.NewPebble(dbPath)
if err != nil {
return errors.Wrapf(err, "open node repo")
}
defer repo.Close()
changes, err := repo.LoadChanges([]byte(name))
if err != nil {
return errors.Wrapf(err, "load commands")
}
for _, chg := range changes {
if chg.Height > height {
break
}
showChange(chg)
}
return nil
},
}
cmd.Flags().StringVar(&name, "name", "", "Name")
cmd.MarkFlagRequired("name")
cmd.Flags().Int32Var(&height, "height", math.MaxInt32, "Height")
return cmd
}
func NewNodeReplayCommand() *cobra.Command {
var name string
var height int32
cmd := &cobra.Command{
Use: "replay",
Short: "Replay the changes of <name> up to <height>",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
dbPath := filepath.Join(dataDir, netName, "claim_dbs", cfg.NodeRepoPebble.Path)
log.Debugf("Open node repo: %q", dbPath)
repo, err := noderepo.NewPebble(dbPath)
if err != nil {
return errors.Wrapf(err, "open node repo")
}
bm, err := node.NewBaseManager(repo)
if err != nil {
return errors.Wrapf(err, "create node manager")
}
defer bm.Close()
nm := node.NewNormalizingManager(bm)
n, err := nm.NodeAt(height, []byte(name))
if err != nil || n == nil {
return errors.Wrapf(err, "get node: %s", name)
}
showNode(n)
return nil
},
}
cmd.Flags().StringVar(&name, "name", "", "Name")
cmd.MarkFlagRequired("name")
cmd.Flags().Int32Var(&height, "height", 0, "Height (inclusive)")
cmd.Flags().SortFlags = false
return cmd
}
func NewNodeChildrenCommand() *cobra.Command {
var name string
cmd := &cobra.Command{
Use: "children",
Short: "Show all the children names of a given node name",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
dbPath := filepath.Join(dataDir, netName, "claim_dbs", cfg.NodeRepoPebble.Path)
log.Debugf("Open node repo: %q", dbPath)
repo, err := noderepo.NewPebble(dbPath)
if err != nil {
return errors.Wrapf(err, "open node repo")
}
defer repo.Close()
fn := func(changes []change.Change) bool {
fmt.Printf("Name: %s, Height: %d, %d\n", changes[0].Name, changes[0].Height,
changes[len(changes)-1].Height)
return true
}
err = repo.IterateChildren([]byte(name), fn)
if err != nil {
return errors.Wrapf(err, "iterate children: %s", name)
}
return nil
},
}
cmd.Flags().StringVar(&name, "name", "", "Name")
cmd.MarkFlagRequired("name")
return cmd
}
func NewNodeStatsCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "stat",
Short: "Determine the number of unique names, average changes per name, etc.",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
dbPath := filepath.Join(dataDir, netName, "claim_dbs", cfg.NodeRepoPebble.Path)
log.Debugf("Open node repo: %q", dbPath)
repo, err := noderepo.NewPebble(dbPath)
if err != nil {
return errors.Wrapf(err, "open node repo")
}
defer repo.Close()
n := 0
c := 0
err = repo.IterateChildren([]byte{}, func(changes []change.Change) bool {
c += len(changes)
n++
if len(changes) > 5000 {
fmt.Printf("Name: %s, Hex: %s, Changes: %d\n", string(changes[0].Name),
hex.EncodeToString(changes[0].Name), len(changes))
}
return true
})
fmt.Printf("\nNames: %d, Average changes: %.2f\n", n, float64(c)/float64(n))
return errors.Wrapf(err, "iterate node repo")
},
}
return cmd
}

61
claimtrie/cmd/cmd/root.go Normal file
View file

@ -0,0 +1,61 @@
package cmd
import (
"os"
"github.com/btcsuite/btclog"
"github.com/lbryio/lbcd/claimtrie/config"
"github.com/lbryio/lbcd/claimtrie/param"
"github.com/lbryio/lbcd/limits"
"github.com/lbryio/lbcd/wire"
"github.com/spf13/cobra"
)
var (
log btclog.Logger
cfg = config.DefaultConfig
netName string
dataDir string
)
var rootCmd = NewRootCommand()
func NewRootCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "claimtrie",
Short: "ClaimTrie Command Line Interface",
SilenceUsage: true,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
switch netName {
case "mainnet":
param.SetNetwork(wire.MainNet)
case "testnet":
param.SetNetwork(wire.TestNet3)
case "regtest":
param.SetNetwork(wire.TestNet)
}
},
}
cmd.PersistentFlags().StringVar(&netName, "netname", "mainnet", "Net name")
cmd.PersistentFlags().StringVarP(&dataDir, "datadir", "b", cfg.DataDir, "Data dir")
return cmd
}
func Execute() {
backendLogger := btclog.NewBackend(os.Stdout)
defer os.Stdout.Sync()
log = backendLogger.Logger("CMDL")
log.SetLevel(btclog.LevelDebug)
// Up some limits.
if err := limits.SetLimits(); err != nil {
log.Errorf("failed to set limits: %v\n", err)
}
rootCmd.Execute() // nolint : errchk
}

View file

@ -0,0 +1,60 @@
package cmd
import (
"path/filepath"
"github.com/lbryio/lbcd/claimtrie/temporal/temporalrepo"
"github.com/cockroachdb/errors"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(NewTemporalCommand())
}
func NewTemporalCommand() *cobra.Command {
var fromHeight int32
var toHeight int32
cmd := &cobra.Command{
Use: "temporal",
Short: "List which nodes are update in a range of heights",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
dbPath := filepath.Join(dataDir, netName, "claim_dbs", cfg.TemporalRepoPebble.Path)
log.Debugf("Open temporal repo: %s", dbPath)
repo, err := temporalrepo.NewPebble(dbPath)
if err != nil {
return errors.Wrapf(err, "open temporal repo")
}
if toHeight <= 0 {
toHeight = fromHeight
}
for ht := fromHeight; ht <= toHeight; ht++ {
names, err := repo.NodesAt(ht)
if err != nil {
return errors.Wrapf(err, "get node names from temporal")
}
if len(names) == 0 {
continue
}
showTemporalNames(ht, names)
}
return nil
},
}
cmd.Flags().Int32Var(&fromHeight, "from", 0, "From height (inclusive)")
cmd.Flags().Int32Var(&toHeight, "to", 0, "To height (inclusive)")
cmd.Flags().SortFlags = false
return cmd
}

76
claimtrie/cmd/cmd/ui.go Normal file
View file

@ -0,0 +1,76 @@
package cmd
import (
"fmt"
"strings"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/claimtrie/node"
)
var status = map[node.Status]string{
node.Accepted: "Accepted",
node.Activated: "Activated",
node.Deactivated: "Deactivated",
}
func changeType(c change.ChangeType) string {
switch c {
case change.AddClaim:
return "AddClaim"
case change.SpendClaim:
return "SpendClaim"
case change.UpdateClaim:
return "UpdateClaim"
case change.AddSupport:
return "AddSupport"
case change.SpendSupport:
return "SpendSupport"
}
return "Unknown"
}
func showChange(chg change.Change) {
fmt.Printf(">>> Height: %6d: %s for %04s, %15d, %s - %s\n",
chg.Height, changeType(chg.Type), chg.ClaimID, chg.Amount, chg.OutPoint, chg.Name)
}
func showClaim(c *node.Claim, n *node.Node) {
mark := " "
if c == n.BestClaim {
mark = "*"
}
fmt.Printf("%s C ID: %s, TXO: %s\n %5d/%-5d, Status: %9s, Amount: %15d, Support Amount: %15d\n",
mark, c.ClaimID, c.OutPoint, c.AcceptedAt, c.ActiveAt, status[c.Status], c.Amount, n.SupportSums[c.ClaimID.Key()])
}
func showSupport(c *node.Claim) {
fmt.Printf(" S id: %s, op: %s, %5d/%-5d, %9s, amt: %15d\n",
c.ClaimID, c.OutPoint, c.AcceptedAt, c.ActiveAt, status[c.Status], c.Amount)
}
func showNode(n *node.Node) {
fmt.Printf("%s\n", strings.Repeat("-", 200))
fmt.Printf("Last Node Takeover: %d\n\n", n.TakenOverAt)
n.SortClaimsByBid()
for _, c := range n.Claims {
showClaim(c, n)
for _, s := range n.Supports {
if s.ClaimID != c.ClaimID {
continue
}
showSupport(s)
}
}
fmt.Printf("\n\n")
}
func showTemporalNames(height int32, names [][]byte) {
fmt.Printf("%7d: %q", height, names[0])
for _, name := range names[1:] {
fmt.Printf(", %q ", name)
}
fmt.Printf("\n")
}

9
claimtrie/cmd/main.go Normal file
View file

@ -0,0 +1,9 @@
package main
import (
"github.com/lbryio/lbcd/claimtrie/cmd/cmd"
)
func main() {
cmd.Execute()
}

View file

@ -0,0 +1,49 @@
package config
import (
"path/filepath"
"github.com/lbryio/lbcd/claimtrie/param"
btcutil "github.com/lbryio/lbcutil"
)
var DefaultConfig = Config{
Params: param.MainNet,
RamTrie: true, // as it stands the other trie uses more RAM, more time, and 40GB+ of disk space
DataDir: filepath.Join(btcutil.AppDataDir("chain", false), "data"),
BlockRepoPebble: pebbleConfig{
Path: "blocks_pebble_db",
},
NodeRepoPebble: pebbleConfig{
Path: "node_change_pebble_db",
},
TemporalRepoPebble: pebbleConfig{
Path: "temporal_pebble_db",
},
MerkleTrieRepoPebble: pebbleConfig{
Path: "merkletrie_pebble_db",
},
}
// Config is the container of all configurations.
type Config struct {
Params param.ClaimTrieParams
RamTrie bool
DataDir string
BlockRepoPebble pebbleConfig
NodeRepoPebble pebbleConfig
TemporalRepoPebble pebbleConfig
MerkleTrieRepoPebble pebbleConfig
Interrupt <-chan struct{}
}
type pebbleConfig struct {
Path string
}

View file

@ -0,0 +1,235 @@
package merkletrie
import (
"github.com/lbryio/lbcd/chaincfg/chainhash"
)
type KeyType []byte
type collapsedVertex struct {
children []*collapsedVertex
key KeyType
merkleHash *chainhash.Hash
claimHash *chainhash.Hash
}
// insertAt inserts v into s at index i and returns the new slice.
// https://stackoverflow.com/questions/42746972/golang-insert-to-a-sorted-slice
func insertAt(data []*collapsedVertex, i int, v *collapsedVertex) []*collapsedVertex {
if i == len(data) {
// Insert at end is the easy case.
return append(data, v)
}
// Make space for the inserted element by shifting
// values at the insertion index up one index. The call
// to append does not allocate memory when cap(data) is
// greater than len(data).
data = append(data[:i+1], data[i:]...)
data[i] = v
return data
}
func (ptn *collapsedVertex) Insert(value *collapsedVertex) *collapsedVertex {
// keep it sorted (and sort.Sort is too slow)
index := sortSearch(ptn.children, value.key[0])
ptn.children = insertAt(ptn.children, index, value)
return value
}
// this sort.Search is stolen shamelessly from search.go,
// and modified for performance to not need a closure
func sortSearch(nodes []*collapsedVertex, b byte) int {
i, j := 0, len(nodes)
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if nodes[h].key[0] < b {
i = h + 1 // preserves f(i-1) == false
} else {
j = h // preserves f(j) == true
}
}
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
return i
}
func (ptn *collapsedVertex) findNearest(key KeyType) (int, *collapsedVertex) {
// none of the children overlap on the first char or we would have a parent node with that char
index := sortSearch(ptn.children, key[0])
hits := ptn.children[index:]
if len(hits) > 0 {
return index, hits[0]
}
return -1, nil
}
type collapsedTrie struct {
Root *collapsedVertex
Nodes int
}
func NewCollapsedTrie() *collapsedTrie {
// we never delete the Root node
return &collapsedTrie{Root: &collapsedVertex{key: make(KeyType, 0)}, Nodes: 1}
}
func (pt *collapsedTrie) NodeCount() int {
return pt.Nodes
}
func matchLength(a, b KeyType) int {
minLen := len(a)
if len(b) < minLen {
minLen = len(b)
}
for i := 0; i < minLen; i++ {
if a[i] != b[i] {
return i
}
}
return minLen
}
func (pt *collapsedTrie) insert(value KeyType, node *collapsedVertex) (bool, *collapsedVertex) {
index, child := node.findNearest(value)
match := 0
if index >= 0 { // if we found a child
child.merkleHash = nil
match = matchLength(value, child.key)
if len(value) == match && len(child.key) == match {
return false, child
}
}
if match <= 0 {
pt.Nodes++
return true, node.Insert(&collapsedVertex{key: value})
}
if match < len(child.key) {
grandChild := collapsedVertex{key: child.key[match:], children: child.children,
claimHash: child.claimHash, merkleHash: child.merkleHash}
newChild := collapsedVertex{key: child.key[0:match], children: []*collapsedVertex{&grandChild}}
child = &newChild
node.children[index] = child
pt.Nodes++
if len(value) == match {
return true, child
}
}
return pt.insert(value[match:], child)
}
func (pt *collapsedTrie) InsertOrFind(value KeyType) (bool, *collapsedVertex) {
pt.Root.merkleHash = nil
if len(value) <= 0 {
return false, pt.Root
}
// we store the name so we need to make our own copy of it
// this avoids errors where this function is called via the DB iterator
v2 := make([]byte, len(value))
copy(v2, value)
return pt.insert(v2, pt.Root)
}
func find(value KeyType, node *collapsedVertex, pathIndexes *[]int, path *[]*collapsedVertex) *collapsedVertex {
index, child := node.findNearest(value)
if index < 0 {
return nil
}
match := matchLength(value, child.key)
if len(value) == match && len(child.key) == match {
if pathIndexes != nil {
*pathIndexes = append(*pathIndexes, index)
}
if path != nil {
*path = append(*path, child)
}
return child
}
if match < len(child.key) || match == len(value) {
return nil
}
if pathIndexes != nil {
*pathIndexes = append(*pathIndexes, index)
}
if path != nil {
*path = append(*path, child)
}
return find(value[match:], child, pathIndexes, path)
}
func (pt *collapsedTrie) Find(value KeyType) *collapsedVertex {
if len(value) <= 0 {
return pt.Root
}
return find(value, pt.Root, nil, nil)
}
func (pt *collapsedTrie) FindPath(value KeyType) ([]int, []*collapsedVertex) {
pathIndexes := []int{-1}
path := []*collapsedVertex{pt.Root}
if len(value) > 0 {
result := find(value, pt.Root, &pathIndexes, &path)
if result == nil { // not sure I want this line
return nil, nil
}
}
return pathIndexes, path
}
// IterateFrom can be used to find a value and run a function on that value.
// If the handler returns true it continues to iterate through the children of value.
func (pt *collapsedTrie) IterateFrom(start KeyType, handler func(name KeyType, value *collapsedVertex) bool) {
node := find(start, pt.Root, nil, nil)
if node == nil {
return
}
iterateFrom(start, node, handler)
}
func iterateFrom(name KeyType, node *collapsedVertex, handler func(name KeyType, value *collapsedVertex) bool) {
for handler(name, node) {
for _, child := range node.children {
iterateFrom(append(name, child.key...), child, handler)
}
}
}
func (pt *collapsedTrie) Erase(value KeyType) bool {
indexes, path := pt.FindPath(value)
if path == nil || len(path) <= 1 {
if len(path) == 1 {
path[0].merkleHash = nil
path[0].claimHash = nil
}
return false
}
nodes := pt.Nodes
i := len(path) - 1
path[i].claimHash = nil // this is the thing we are erasing; the rest is book-keeping
for ; i > 0; i-- {
childCount := len(path[i].children)
noClaimData := path[i].claimHash == nil
path[i].merkleHash = nil
if childCount == 1 && noClaimData {
path[i].key = append(path[i].key, path[i].children[0].key...)
path[i].claimHash = path[i].children[0].claimHash
path[i].children = path[i].children[0].children
pt.Nodes--
continue
}
if childCount == 0 && noClaimData {
index := indexes[i]
path[i-1].children = append(path[i-1].children[:index], path[i-1].children[index+1:]...)
pt.Nodes--
continue
}
break
}
for ; i >= 0; i-- {
path[i].merkleHash = nil
}
return nodes > pt.Nodes
}

View file

@ -0,0 +1,113 @@
package merkletrie
import (
"bytes"
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func b(value string) []byte { return []byte(value) }
func eq(x []byte, y string) bool { return bytes.Equal(x, b(y)) }
func TestInsertAndErase(t *testing.T) {
trie := NewCollapsedTrie()
assert.True(t, trie.NodeCount() == 1)
inserted, node := trie.InsertOrFind(b("abc"))
assert.True(t, inserted)
assert.NotNil(t, node)
assert.Equal(t, 2, trie.NodeCount())
inserted, node = trie.InsertOrFind(b("abd"))
assert.True(t, inserted)
assert.Equal(t, 4, trie.NodeCount())
assert.NotNil(t, node)
hit := trie.Find(b("ab"))
assert.True(t, eq(hit.key, "ab"))
assert.Equal(t, 2, len(hit.children))
hit = trie.Find(b("abc"))
assert.True(t, eq(hit.key, "c"))
hit = trie.Find(b("abd"))
assert.True(t, eq(hit.key, "d"))
hit = trie.Find(b("a"))
assert.Nil(t, hit)
indexes, path := trie.FindPath(b("abd"))
assert.Equal(t, 3, len(indexes))
assert.True(t, eq(path[1].key, "ab"))
erased := trie.Erase(b("ab"))
assert.False(t, erased)
assert.Equal(t, 4, trie.NodeCount())
erased = trie.Erase(b("abc"))
assert.True(t, erased)
assert.Equal(t, 2, trie.NodeCount())
erased = trie.Erase(b("abd"))
assert.True(t, erased)
assert.Equal(t, 1, trie.NodeCount())
}
func TestNilNameHandling(t *testing.T) {
trie := NewCollapsedTrie()
inserted, n := trie.InsertOrFind([]byte("test"))
assert.True(t, inserted)
n.claimHash = EmptyTrieHash
inserted, n = trie.InsertOrFind(nil)
assert.False(t, inserted)
n.claimHash = EmptyTrieHash
n.merkleHash = EmptyTrieHash
inserted, n = trie.InsertOrFind(nil)
assert.False(t, inserted)
assert.NotNil(t, n.claimHash)
assert.Nil(t, n.merkleHash)
nodeRemoved := trie.Erase(nil)
assert.False(t, nodeRemoved)
inserted, n = trie.InsertOrFind(nil)
assert.False(t, inserted)
assert.Nil(t, n.claimHash)
}
func TestCollapsedTriePerformance(t *testing.T) {
inserts := 100000 // increase this to 1M for more interesting results
data := make([][]byte, inserts)
rand.Seed(42)
for i := 0; i < inserts; i++ {
size := rand.Intn(70) + 4
data[i] = make([]byte, size)
rand.Read(data[i])
for j := 0; j < size; j++ {
data[i][j] %= byte(62) // shrink the range to match the old test
}
}
trie := NewCollapsedTrie()
// doing my own timing because I couldn't get the B.Run method to work:
start := time.Now()
for i := 0; i < inserts; i++ {
_, node := trie.InsertOrFind(data[i])
assert.NotNil(t, node, "Failure at %d of %d", i, inserts)
}
t.Logf("Insertion in %f sec.", time.Since(start).Seconds())
start = time.Now()
for i := 0; i < inserts; i++ {
node := trie.Find(data[i])
assert.True(t, bytes.HasSuffix(data[i], node.key), "Failure on %d of %d", i, inserts)
}
t.Logf("Lookup in %f sec. on %d nodes.", time.Since(start).Seconds(), trie.NodeCount())
start = time.Now()
for i := 0; i < inserts; i++ {
indexes, path := trie.FindPath(data[i])
assert.True(t, len(indexes) == len(path))
assert.True(t, len(path) > 1)
assert.True(t, bytes.HasSuffix(data[i], path[len(path)-1].key))
}
t.Logf("Parents in %f sec.", time.Since(start).Seconds())
start = time.Now()
for i := 0; i < inserts; i++ {
trie.Erase(data[i])
}
t.Logf("Deletion in %f sec.", time.Since(start).Seconds())
assert.Equal(t, 1, trie.NodeCount())
}

View file

@ -0,0 +1,255 @@
package merkletrie
import (
"bytes"
"fmt"
"runtime"
"sort"
"sync"
"github.com/pkg/errors"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/claimtrie/node"
)
var (
// EmptyTrieHash represents the Merkle Hash of an empty PersistentTrie.
// "0000000000000000000000000000000000000000000000000000000000000001"
EmptyTrieHash = &chainhash.Hash{1}
NoChildrenHash = &chainhash.Hash{2}
NoClaimsHash = &chainhash.Hash{3}
)
// PersistentTrie implements a 256-way prefix tree.
type PersistentTrie struct {
repo Repo
root *vertex
bufs *sync.Pool
}
// NewPersistentTrie returns a PersistentTrie.
func NewPersistentTrie(repo Repo) *PersistentTrie {
tr := &PersistentTrie{
repo: repo,
bufs: &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
root: newVertex(EmptyTrieHash),
}
return tr
}
// SetRoot drops all resolved nodes in the PersistentTrie, and set the Root with specified hash.
func (t *PersistentTrie) SetRoot(h *chainhash.Hash) error {
t.root = newVertex(h)
runtime.GC()
return nil
}
// Update updates the nodes along the path to the key.
// Each node is resolved or created with their Hash cleared.
func (t *PersistentTrie) Update(name []byte, hash *chainhash.Hash, restoreChildren bool) {
n := t.root
for i, ch := range name {
if restoreChildren && len(n.childLinks) == 0 {
t.resolveChildLinks(n, name[:i])
}
if n.childLinks[ch] == nil {
n.childLinks[ch] = newVertex(nil)
}
n.merkleHash = nil
n = n.childLinks[ch]
}
if restoreChildren && len(n.childLinks) == 0 {
t.resolveChildLinks(n, name)
}
n.merkleHash = nil
n.claimsHash = hash
}
// resolveChildLinks updates the links on n
func (t *PersistentTrie) resolveChildLinks(n *vertex, key []byte) {
if n.merkleHash == nil {
return
}
b := t.bufs.Get().(*bytes.Buffer)
defer t.bufs.Put(b)
b.Reset()
b.Write(key)
b.Write(n.merkleHash[:])
result, closer, err := t.repo.Get(b.Bytes())
if result == nil {
return
} else if err != nil {
panic(err)
}
defer closer.Close()
nb := nbuf(result)
_, n.claimsHash = nb.hasValue()
for i := 0; i < nb.entries(); i++ {
p, h := nb.entry(i)
n.childLinks[p] = newVertex(h)
}
}
// MerkleHash returns the Merkle Hash of the PersistentTrie.
// All nodes must have been resolved before calling this function.
func (t *PersistentTrie) MerkleHash() *chainhash.Hash {
buf := make([]byte, 0, 256)
if h := t.merkle(buf, t.root); h == nil {
return EmptyTrieHash
}
return t.root.merkleHash
}
// merkle recursively resolves the hashes of the node.
// All nodes must have been resolved before calling this function.
func (t *PersistentTrie) merkle(prefix []byte, v *vertex) *chainhash.Hash {
if v.merkleHash != nil {
return v.merkleHash
}
b := t.bufs.Get().(*bytes.Buffer)
defer t.bufs.Put(b)
b.Reset()
keys := keysInOrder(v)
for _, ch := range keys {
child := v.childLinks[ch]
if child == nil {
continue
}
p := append(prefix, ch)
h := t.merkle(p, child)
if h != nil {
b.WriteByte(ch) // nolint : errchk
b.Write(h[:]) // nolint : errchk
}
if h == nil || len(prefix) > 4 { // TODO: determine the right number here
delete(v.childLinks, ch) // keep the RAM down (they get recreated on Update)
}
}
if v.claimsHash != nil {
b.Write(v.claimsHash[:])
}
if b.Len() > 0 {
h := chainhash.DoubleHashH(b.Bytes())
v.merkleHash = &h
t.repo.Set(append(prefix, h[:]...), b.Bytes())
}
return v.merkleHash
}
func keysInOrder(v *vertex) []byte {
keys := make([]byte, 0, len(v.childLinks))
for key := range v.childLinks {
keys = append(keys, key)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
return keys
}
func (t *PersistentTrie) MerkleHashAllClaims() *chainhash.Hash {
buf := make([]byte, 0, 256)
if h := t.merkleAllClaims(buf, t.root); h == nil {
return EmptyTrieHash
}
return t.root.merkleHash
}
func (t *PersistentTrie) merkleAllClaims(prefix []byte, v *vertex) *chainhash.Hash {
if v.merkleHash != nil {
return v.merkleHash
}
b := t.bufs.Get().(*bytes.Buffer)
defer t.bufs.Put(b)
b.Reset()
keys := keysInOrder(v)
childHashes := make([]*chainhash.Hash, 0, len(keys))
for _, ch := range keys {
n := v.childLinks[ch]
if n == nil {
continue
}
p := append(prefix, ch)
h := t.merkleAllClaims(p, n)
if h != nil {
childHashes = append(childHashes, h)
b.WriteByte(ch) // nolint : errchk
b.Write(h[:]) // nolint : errchk
}
if h == nil || len(prefix) > 4 { // TODO: determine the right number here
delete(v.childLinks, ch) // keep the RAM down (they get recreated on Update)
}
}
if len(childHashes) > 1 || v.claimsHash != nil { // yeah, about that 1 there -- old code used the condensed trie
left := NoChildrenHash
if len(childHashes) > 0 {
left = node.ComputeMerkleRoot(childHashes)
}
right := NoClaimsHash
if v.claimsHash != nil {
b.Write(v.claimsHash[:]) // for Has Value, nolint : errchk
right = v.claimsHash
}
h := node.HashMerkleBranches(left, right)
v.merkleHash = h
t.repo.Set(append(prefix, h[:]...), b.Bytes())
} else if len(childHashes) == 1 {
v.merkleHash = childHashes[0] // pass it up the tree
t.repo.Set(append(prefix, v.merkleHash[:]...), b.Bytes())
}
return v.merkleHash
}
func (t *PersistentTrie) Close() error {
return errors.WithStack(t.repo.Close())
}
func (t *PersistentTrie) Dump(s string) {
// TODO: this function is in the wrong spot; either it goes with its caller or it needs to be a generic iterator
// we don't want fmt used in here either way
v := t.root
for i := 0; i < len(s); i++ {
t.resolveChildLinks(v, []byte(s[:i]))
ch := s[i]
v = v.childLinks[ch]
if v == nil {
fmt.Printf("Missing child at %s\n", s[:i+1])
return
}
}
t.resolveChildLinks(v, []byte(s))
fmt.Printf("Node hash: %s, has value: %t\n", v.merkleHash.String(), v.claimsHash != nil)
for key, value := range v.childLinks {
fmt.Printf(" Child %s hash: %s\n", string(key), value.merkleHash.String())
}
}
func (t *PersistentTrie) Flush() error {
return t.repo.Flush()
}

View file

@ -0,0 +1,25 @@
package merkletrie
import (
"testing"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/claimtrie/node"
"github.com/stretchr/testify/require"
)
func TestName(t *testing.T) {
r := require.New(t)
target, _ := chainhash.NewHashFromStr("e9ffb584c62449f157c8be88257bd1eebb2d8ef824f5c86b43c4f8fd9e800d6a")
data := []*chainhash.Hash{EmptyTrieHash}
root := node.ComputeMerkleRoot(data)
r.True(EmptyTrieHash.IsEqual(root))
data = append(data, NoChildrenHash, NoClaimsHash)
root = node.ComputeMerkleRoot(data)
r.True(target.IsEqual(root))
}

View file

@ -0,0 +1,67 @@
package merkletrierepo
import (
"io"
"github.com/cockroachdb/pebble"
"github.com/pkg/errors"
)
type Pebble struct {
db *pebble.DB
}
func NewPebble(path string) (*Pebble, error) {
cache := pebble.NewCache(512 << 20)
//defer cache.Unref()
//
//go func() {
// tick := time.NewTicker(60 * time.Second)
// for range tick.C {
//
// m := cache.Metrics()
// fmt.Printf("cnt: %s, objs: %s, hits: %s, miss: %s, hitrate: %.2f\n",
// humanize.Bytes(uint64(m.Size)),
// humanize.Comma(m.Count),
// humanize.Comma(m.Hits),
// humanize.Comma(m.Misses),
// float64(m.Hits)/float64(m.Hits+m.Misses))
//
// }
//}()
db, err := pebble.Open(path, &pebble.Options{Cache: cache, BytesPerSync: 32 << 20, MaxOpenFiles: 2000})
repo := &Pebble{db: db}
return repo, errors.Wrapf(err, "unable to open %s", path)
}
func (repo *Pebble) Get(key []byte) ([]byte, io.Closer, error) {
d, c, e := repo.db.Get(key)
if e == pebble.ErrNotFound {
return nil, c, nil
}
return d, c, e
}
func (repo *Pebble) Set(key, value []byte) error {
return repo.db.Set(key, value, pebble.NoSync)
}
func (repo *Pebble) Close() error {
err := repo.db.Flush()
if err != nil {
// if we fail to close are we going to try again later?
return errors.Wrap(err, "on flush")
}
err = repo.db.Close()
return errors.Wrap(err, "on close")
}
func (repo *Pebble) Flush() error {
_, err := repo.db.AsyncFlush()
return err
}

View file

@ -0,0 +1,139 @@
package merkletrie
import (
"bytes"
"errors"
"runtime"
"sync"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/claimtrie/node"
)
type MerkleTrie interface {
SetRoot(h *chainhash.Hash) error
Update(name []byte, h *chainhash.Hash, restoreChildren bool)
MerkleHash() *chainhash.Hash
MerkleHashAllClaims() *chainhash.Hash
Flush() error
}
type RamTrie struct {
collapsedTrie
bufs *sync.Pool
}
func NewRamTrie() *RamTrie {
return &RamTrie{
bufs: &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
collapsedTrie: collapsedTrie{Root: &collapsedVertex{merkleHash: EmptyTrieHash}},
}
}
var ErrFullRebuildRequired = errors.New("a full rebuild is required")
func (rt *RamTrie) SetRoot(h *chainhash.Hash) error {
if rt.Root.merkleHash.IsEqual(h) {
runtime.GC()
return nil
}
// should technically clear the old trie first, but this is abused for partial rebuilds so don't
return ErrFullRebuildRequired
}
func (rt *RamTrie) Update(name []byte, h *chainhash.Hash, _ bool) {
if h == nil {
rt.Erase(name)
} else {
_, n := rt.InsertOrFind(name)
n.claimHash = h
}
}
func (rt *RamTrie) MerkleHash() *chainhash.Hash {
if h := rt.merkleHash(rt.Root); h == nil {
return EmptyTrieHash
}
return rt.Root.merkleHash
}
func (rt *RamTrie) merkleHash(v *collapsedVertex) *chainhash.Hash {
if v.merkleHash != nil {
return v.merkleHash
}
b := rt.bufs.Get().(*bytes.Buffer)
defer rt.bufs.Put(b)
b.Reset()
for _, ch := range v.children {
h := rt.merkleHash(ch) // h is a pointer; don't destroy its data
b.WriteByte(ch.key[0]) // nolint : errchk
b.Write(rt.completeHash(h, ch.key)) // nolint : errchk
}
if v.claimHash != nil {
b.Write(v.claimHash[:])
}
if b.Len() > 0 {
h := chainhash.DoubleHashH(b.Bytes())
v.merkleHash = &h
}
return v.merkleHash
}
func (rt *RamTrie) completeHash(h *chainhash.Hash, childKey KeyType) []byte {
var data [chainhash.HashSize + 1]byte
copy(data[1:], h[:])
for i := len(childKey) - 1; i > 0; i-- {
data[0] = childKey[i]
copy(data[1:], chainhash.DoubleHashB(data[:]))
}
return data[1:]
}
func (rt *RamTrie) MerkleHashAllClaims() *chainhash.Hash {
if h := rt.merkleHashAllClaims(rt.Root); h == nil {
return EmptyTrieHash
}
return rt.Root.merkleHash
}
func (rt *RamTrie) merkleHashAllClaims(v *collapsedVertex) *chainhash.Hash {
if v.merkleHash != nil {
return v.merkleHash
}
childHashes := make([]*chainhash.Hash, 0, len(v.children))
for _, ch := range v.children {
h := rt.merkleHashAllClaims(ch)
childHashes = append(childHashes, h)
}
claimHash := NoClaimsHash
if v.claimHash != nil {
claimHash = v.claimHash
} else if len(childHashes) == 0 {
return nil
}
childHash := NoChildrenHash
if len(childHashes) > 0 {
// this shouldn't be referencing node; where else can we put this merkle root func?
childHash = node.ComputeMerkleRoot(childHashes)
}
v.merkleHash = node.HashMerkleBranches(childHash, claimHash)
return v.merkleHash
}
func (rt *RamTrie) Flush() error {
return nil
}

View file

@ -0,0 +1,13 @@
package merkletrie
import (
"io"
)
// Repo defines APIs for PersistentTrie to access persistence layer.
type Repo interface {
Get(key []byte) ([]byte, io.Closer, error)
Set(key, value []byte) error
Close() error
Flush() error
}

View file

@ -0,0 +1,43 @@
package merkletrie
import (
"github.com/lbryio/lbcd/chaincfg/chainhash"
)
type vertex struct {
merkleHash *chainhash.Hash
claimsHash *chainhash.Hash
childLinks map[byte]*vertex
}
func newVertex(hash *chainhash.Hash) *vertex {
return &vertex{childLinks: map[byte]*vertex{}, merkleHash: hash}
}
// TODO: more professional to use msgpack here?
// nbuf decodes the on-disk format of a node, which has the following form:
// ch(1B) hash(32B)
// ...
// ch(1B) hash(32B)
// vhash(32B)
type nbuf []byte
func (nb nbuf) entries() int {
return len(nb) / 33
}
func (nb nbuf) entry(i int) (byte, *chainhash.Hash) {
h := chainhash.Hash{}
copy(h[:], nb[33*i+1:])
return nb[33*i], &h
}
func (nb nbuf) hasValue() (bool, *chainhash.Hash) {
if len(nb)%33 == 0 {
return false, nil
}
h := chainhash.Hash{}
copy(h[:], nb[len(nb)-32:])
return true, &h
}

82
claimtrie/node/claim.go Normal file
View file

@ -0,0 +1,82 @@
package node
import (
"bytes"
"strconv"
"strings"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/wire"
)
type Status int
const (
Accepted Status = iota
Activated
Deactivated
)
// Claim defines a structure of stake, which could be a Claim or Support.
type Claim struct {
OutPoint wire.OutPoint
ClaimID change.ClaimID
Amount int64
// CreatedAt int32 // the very first block, unused at present
AcceptedAt int32 // the latest update height
ActiveAt int32 // AcceptedAt + actual delay
VisibleAt int32
Status Status `msgpack:",omitempty"`
Sequence int32 `msgpack:",omitempty"`
}
func (c *Claim) setOutPoint(op wire.OutPoint) *Claim {
c.OutPoint = op
return c
}
func (c *Claim) SetAmt(amt int64) *Claim {
c.Amount = amt
return c
}
func (c *Claim) setAccepted(height int32) *Claim {
c.AcceptedAt = height
return c
}
func (c *Claim) setActiveAt(height int32) *Claim {
c.ActiveAt = height
return c
}
func (c *Claim) setStatus(status Status) *Claim {
c.Status = status
return c
}
func OutPointLess(a, b wire.OutPoint) bool {
switch cmp := bytes.Compare(a.Hash[:], b.Hash[:]); {
case cmp < 0:
return true
case cmp > 0:
return false
default:
return a.Index < b.Index
}
}
func NewOutPointFromString(str string) *wire.OutPoint {
f := strings.Split(str, ":")
if len(f) != 2 {
return nil
}
hash, _ := chainhash.NewHashFromStr(f[0])
idx, _ := strconv.Atoi(f[1])
return wire.NewOutPoint(hash, uint32(idx))
}

View file

@ -0,0 +1,33 @@
package node
import (
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/wire"
)
type ClaimList []*Claim
type comparator func(c *Claim) bool
func byID(id change.ClaimID) comparator {
return func(c *Claim) bool {
return c.ClaimID == id
}
}
func byOut(out wire.OutPoint) comparator {
return func(c *Claim) bool {
return c.OutPoint == out // assuming value comparison
}
}
func (l ClaimList) find(cmp comparator) *Claim {
for i := range l {
if cmp(l[i]) {
return l[i]
}
}
return nil
}

View file

@ -0,0 +1,39 @@
package node
import (
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/claimtrie/param"
)
type HashV2Manager struct {
Manager
}
func (nm *HashV2Manager) computeClaimHashes(name []byte) (*chainhash.Hash, int32) {
n, err := nm.NodeAt(nm.Height(), name)
if err != nil || n == nil {
return nil, 0
}
n.SortClaimsByBid()
claimHashes := make([]*chainhash.Hash, 0, len(n.Claims))
for _, c := range n.Claims {
if c.Status == Activated { // TODO: unit test this line
claimHashes = append(claimHashes, calculateNodeHash(c.OutPoint, n.TakenOverAt))
}
}
if len(claimHashes) > 0 {
return ComputeMerkleRoot(claimHashes), n.NextUpdate()
}
return nil, n.NextUpdate()
}
func (nm *HashV2Manager) Hash(name []byte) (*chainhash.Hash, int32) {
if nm.Height() >= param.ActiveParams.AllClaimsInMerkleForkHeight {
return nm.computeClaimHashes(name)
}
return nm.Manager.Hash(name)
}

View file

@ -0,0 +1,57 @@
package node
import (
"crypto/sha256"
"encoding/binary"
"strconv"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/wire"
)
func HashMerkleBranches(left *chainhash.Hash, right *chainhash.Hash) *chainhash.Hash {
// Concatenate the left and right nodes.
var hash [chainhash.HashSize * 2]byte
copy(hash[:chainhash.HashSize], left[:])
copy(hash[chainhash.HashSize:], right[:])
newHash := chainhash.DoubleHashH(hash[:])
return &newHash
}
func ComputeMerkleRoot(hashes []*chainhash.Hash) *chainhash.Hash {
if len(hashes) <= 0 {
return nil
}
for len(hashes) > 1 {
if (len(hashes) & 1) > 0 { // odd count
hashes = append(hashes, hashes[len(hashes)-1])
}
for i := 0; i < len(hashes); i += 2 { // TODO: parallelize this loop (or use a lib that does it)
hashes[i>>1] = HashMerkleBranches(hashes[i], hashes[i+1])
}
hashes = hashes[:len(hashes)>>1]
}
return hashes[0]
}
func calculateNodeHash(op wire.OutPoint, takeover int32) *chainhash.Hash {
txHash := chainhash.DoubleHashH(op.Hash[:])
nOut := []byte(strconv.Itoa(int(op.Index)))
nOutHash := chainhash.DoubleHashH(nOut)
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(takeover))
heightHash := chainhash.DoubleHashH(buf)
h := make([]byte, 0, sha256.Size*3)
h = append(h, txHash[:]...)
h = append(h, nOutHash[:]...)
h = append(h, heightHash[:]...)
hh := chainhash.DoubleHashH(h)
return &hh
}

47
claimtrie/node/log.go Normal file
View file

@ -0,0 +1,47 @@
package node
import (
"sync"
"github.com/btcsuite/btclog"
)
// log is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
var log btclog.Logger
// The default amount of logging is none.
func init() {
DisableLog()
}
// DisableLog disables all library log output. Logging output is disabled
// by default until either UseLogger or SetLogWriter are called.
func DisableLog() {
log = btclog.Disabled
}
// UseLogger uses a specified Logger to output package logging info.
// This should be used in preference to SetLogWriter if the caller is also
// using btclog.
func UseLogger(logger btclog.Logger) {
log = logger
}
var loggedStrings = map[string]bool{} // is this gonna get too large?
var loggedStringsMutex sync.Mutex
func LogOnce(s string) {
loggedStringsMutex.Lock()
defer loggedStringsMutex.Unlock()
if loggedStrings[s] {
return
}
loggedStrings[s] = true
log.Info(s)
}
func Warn(s string) {
log.Warn(s)
}

401
claimtrie/node/manager.go Normal file
View file

@ -0,0 +1,401 @@
package node
import (
"fmt"
"sort"
"github.com/pkg/errors"
"github.com/lbryio/lbcd/chaincfg/chainhash"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/claimtrie/param"
)
type Manager interface {
AppendChange(chg change.Change)
IncrementHeightTo(height int32, temporary bool) ([][]byte, error)
DecrementHeightTo(affectedNames [][]byte, height int32) ([][]byte, error)
Height() int32
Close() error
NodeAt(height int32, name []byte) (*Node, error)
IterateNames(predicate func(name []byte) bool)
Hash(name []byte) (*chainhash.Hash, int32)
Flush() error
}
type BaseManager struct {
repo Repo
height int32
changes []change.Change
tempChanges map[string][]change.Change
}
func NewBaseManager(repo Repo) (*BaseManager, error) {
nm := &BaseManager{
repo: repo,
}
return nm, nil
}
func (nm *BaseManager) NodeAt(height int32, name []byte) (*Node, error) {
changes, err := nm.repo.LoadChanges(name)
if err != nil {
return nil, errors.Wrap(err, "in load changes")
}
if nm.tempChanges != nil { // making an assumption that we only ever have tempChanges for a single block
changes = append(changes, nm.tempChanges[string(name)]...)
}
n, err := nm.newNodeFromChanges(changes, height)
if err != nil {
return nil, errors.Wrap(err, "in new node")
}
return n, nil
}
// Node returns a node at the current height.
// The returned node may have pending changes.
func (nm *BaseManager) node(name []byte) (*Node, error) {
return nm.NodeAt(nm.height, name)
}
// newNodeFromChanges returns a new Node constructed from the changes.
// The changes must preserve their order received.
func (nm *BaseManager) newNodeFromChanges(changes []change.Change, height int32) (*Node, error) {
if len(changes) == 0 {
return nil, nil
}
n := New()
previous := changes[0].Height
count := len(changes)
for i, chg := range changes {
if chg.Height < previous {
panic("expected the changes to be in order by height")
}
if chg.Height > height {
count = i
break
}
if previous < chg.Height {
n.AdjustTo(previous, chg.Height-1, chg.Name) // update bids and activation
previous = chg.Height
}
delay := nm.getDelayForName(n, chg)
err := n.ApplyChange(chg, delay)
if err != nil {
return nil, errors.Wrap(err, "in apply change")
}
}
if count <= 0 {
return nil, nil
}
lastChange := changes[count-1]
return n.AdjustTo(lastChange.Height, height, lastChange.Name), nil
}
func (nm *BaseManager) AppendChange(chg change.Change) {
nm.changes = append(nm.changes, chg)
// worth putting in this kind of thing pre-emptively?
// log.Debugf("CHG: %d, %s, %v, %s, %d", chg.Height, chg.Name, chg.Type, chg.ClaimID, chg.Amount)
}
func collectChildNames(changes []change.Change) {
// we need to determine which children (names that start with the same name) go with which change
// if we have the names in order then we can avoid iterating through all names in the change list
// and we can possibly reuse the previous list.
// what would happen in the old code:
// spending a claim (which happens before every update) could remove a node from the cached trie
// in which case we would fall back on the data from the previous block (where it obviously wasn't spent).
// It would only delete the node if it had no children, but have even some rare situations
// Where all of the children happen to be deleted first. That's what we must detect here.
// Algorithm:
// For each non-spend change
// Loop through all the spends before you and add them to your child list if they are your child
type pair struct {
name string
order int
}
spends := make([]pair, 0, len(changes))
for i := range changes {
t := changes[i].Type
if t != change.SpendClaim {
continue
}
spends = append(spends, pair{string(changes[i].Name), i})
}
sort.Slice(spends, func(i, j int) bool {
return spends[i].name < spends[j].name
})
for i := range changes {
t := changes[i].Type
if t == change.SpendClaim || t == change.SpendSupport {
continue
}
a := string(changes[i].Name)
sc := map[string]bool{}
idx := sort.Search(len(spends), func(k int) bool {
return spends[k].name > a
})
for idx < len(spends) {
b := spends[idx].name
if len(b) <= len(a) || a != b[:len(a)] {
break // since they're ordered alphabetically, we should be able to break out once we're past matches
}
if spends[idx].order < i {
sc[b] = true
}
idx++
}
changes[i].SpentChildren = sc
}
}
// to understand the above function, it may be helpful to refer to the slower implementation:
//func collectChildNamesSlow(changes []change.Change) {
// for i := range changes {
// t := changes[i].Type
// if t == change.SpendClaim || t == change.SpendSupport {
// continue
// }
// a := changes[i].Name
// sc := map[string]bool{}
// for j := 0; j < i; j++ {
// t = changes[j].Type
// if t != change.SpendClaim {
// continue
// }
// b := changes[j].Name
// if len(b) >= len(a) && bytes.Equal(a, b[:len(a)]) {
// sc[string(b)] = true
// }
// }
// changes[i].SpentChildren = sc
// }
//}
func (nm *BaseManager) IncrementHeightTo(height int32, temporary bool) ([][]byte, error) {
if height <= nm.height {
panic("invalid height")
}
if height >= param.ActiveParams.MaxRemovalWorkaroundHeight {
// not technically needed until block 884430, but to be true to the arbitrary rollback length...
collectChildNames(nm.changes)
}
if temporary {
if nm.tempChanges != nil {
return nil, errors.Errorf("expected nil temporary changes")
}
nm.tempChanges = map[string][]change.Change{}
}
names := make([][]byte, 0, len(nm.changes))
for i := range nm.changes {
names = append(names, nm.changes[i].Name)
if temporary {
name := string(nm.changes[i].Name)
nm.tempChanges[name] = append(nm.tempChanges[name], nm.changes[i])
}
}
if !temporary {
if err := nm.repo.AppendChanges(nm.changes); err != nil { // destroys names
return nil, errors.Wrap(err, "in append changes")
}
}
// Truncate the buffer size to zero.
if len(nm.changes) > 1000 { // TODO: determine a good number here
nm.changes = nil // release the RAM
} else {
nm.changes = nm.changes[:0]
}
nm.height = height
return names, nil
}
func (nm *BaseManager) DecrementHeightTo(affectedNames [][]byte, height int32) ([][]byte, error) {
if height >= nm.height {
return affectedNames, errors.Errorf("invalid height of %d for %d", height, nm.height)
}
if nm.tempChanges != nil {
if height != nm.height-1 {
return affectedNames, errors.Errorf("invalid temporary rollback at %d to %d", height, nm.height)
}
for key := range nm.tempChanges {
affectedNames = append(affectedNames, []byte(key))
}
nm.tempChanges = nil
} else {
for _, name := range affectedNames {
if err := nm.repo.DropChanges(name, height); err != nil {
return affectedNames, errors.Wrap(err, "in drop changes")
}
}
}
nm.height = height
return affectedNames, nil
}
func (nm *BaseManager) getDelayForName(n *Node, chg change.Change) int32 {
// Note: we don't consider the active status of BestClaim here on purpose.
// That's because we deactivate and reactivate as part of claim updates.
// However, the final status will be accounted for when we compute the takeover heights;
// claims may get activated early at that point.
hasBest := n.BestClaim != nil
if hasBest && n.BestClaim.ClaimID == chg.ClaimID {
return 0
}
if chg.ActiveHeight >= chg.Height { // ActiveHeight is usually unset (aka, zero)
return chg.ActiveHeight - chg.Height
}
if !hasBest {
return 0
}
delay := calculateDelay(chg.Height, n.TakenOverAt)
if delay > 0 && nm.aWorkaroundIsNeeded(n, chg) {
if chg.Height >= nm.height {
LogOnce(fmt.Sprintf("Delay workaround applies to %s at %d, ClaimID: %s",
chg.Name, chg.Height, chg.ClaimID))
}
return 0
}
return delay
}
func hasZeroActiveClaims(n *Node) bool {
// this isn't quite the same as having an active best (since that is only updated after all changes are processed)
for _, c := range n.Claims {
if c.Status == Activated {
return false
}
}
return true
}
// aWorkaroundIsNeeded handles bugs that existed in previous versions
func (nm *BaseManager) aWorkaroundIsNeeded(n *Node, chg change.Change) bool {
if chg.Type == change.SpendClaim || chg.Type == change.SpendSupport {
return false
}
if chg.Height >= param.ActiveParams.MaxRemovalWorkaroundHeight {
// TODO: hard fork this out; it's a bug from previous versions:
// old 17.3 C++ code we're trying to mimic (where empty means no active claims):
// auto it = nodesToAddOrUpdate.find(name); // nodesToAddOrUpdate is the working changes, base is previous block
// auto answer = (it || (it = base->find(name))) && !it->empty() ? nNextHeight - it->nHeightOfLastTakeover : 0;
return hasZeroActiveClaims(n) && nm.hasChildren(chg.Name, chg.Height, chg.SpentChildren, 2)
} else if len(n.Claims) > 0 {
// NOTE: old code had a bug in it where nodes with no claims but with children would get left in the cache after removal.
// This would cause the getNumBlocksOfContinuousOwnership to return zero (causing incorrect takeover height calc).
w, ok := param.DelayWorkarounds[string(chg.Name)]
if ok {
for _, h := range w {
if chg.Height == h {
return true
}
}
}
}
return false
}
func calculateDelay(curr, tookOver int32) int32 {
delay := (curr - tookOver) / param.ActiveParams.ActiveDelayFactor
if delay > param.ActiveParams.MaxActiveDelay {
return param.ActiveParams.MaxActiveDelay
}
return delay
}
func (nm *BaseManager) Height() int32 {
return nm.height
}
func (nm *BaseManager) Close() error {
return errors.WithStack(nm.repo.Close())
}
func (nm *BaseManager) hasChildren(name []byte, height int32, spentChildren map[string]bool, required int) bool {
c := map[byte]bool{}
if spentChildren == nil {
spentChildren = map[string]bool{}
}
err := nm.repo.IterateChildren(name, func(changes []change.Change) bool {
// if the key is unseen, generate a node for it to height
// if that node is active then increase the count
if len(changes) == 0 {
return true
}
if c[changes[0].Name[len(name)]] { // assuming all names here are longer than starter name
return true // we already checked a similar name
}
if spentChildren[string(changes[0].Name)] {
return true // children that are spent in the same block cannot count as active children
}
n, _ := nm.newNodeFromChanges(changes, height)
if n != nil && n.HasActiveBestClaim() {
c[changes[0].Name[len(name)]] = true
if len(c) >= required {
return false
}
}
return true
})
return err == nil && len(c) >= required
}
func (nm *BaseManager) IterateNames(predicate func(name []byte) bool) {
nm.repo.IterateAll(predicate)
}
func (nm *BaseManager) Hash(name []byte) (*chainhash.Hash, int32) {
n, err := nm.node(name)
if err != nil || n == nil {
return nil, 0
}
if len(n.Claims) > 0 {
if n.BestClaim != nil && n.BestClaim.Status == Activated {
h := calculateNodeHash(n.BestClaim.OutPoint, n.TakenOverAt)
return h, n.NextUpdate()
}
}
return nil, n.NextUpdate()
}
func (nm *BaseManager) Flush() error {
return nm.repo.Flush()
}

View file

@ -0,0 +1,299 @@
package node
import (
"fmt"
"testing"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/claimtrie/node/noderepo"
"github.com/lbryio/lbcd/claimtrie/param"
"github.com/lbryio/lbcd/wire"
"github.com/stretchr/testify/require"
)
var (
out1 = NewOutPointFromString("0000000000000000000000000000000000000000000000000000000000000000:1")
out2 = NewOutPointFromString("0000000000000000000000000000000000000000000000000000000000000000:2")
out3 = NewOutPointFromString("0100000000000000000000000000000000000000000000000000000000000000:1")
out4 = NewOutPointFromString("0100000000000000000000000000000000000000000000000000000000000000:2")
name1 = []byte("name1")
name2 = []byte("name2")
)
// verify that we can round-trip bytes to strings
func TestStringRoundTrip(t *testing.T) {
r := require.New(t)
data := [][]byte{
{97, 98, 99, 0, 100, 255},
{0xc3, 0x28},
{0xa0, 0xa1},
{0xe2, 0x28, 0xa1},
{0xf0, 0x28, 0x8c, 0x28},
}
for _, d := range data {
s := string(d)
r.Equal(s, fmt.Sprintf("%s", d)) // nolint
d2 := []byte(s)
r.Equal(len(d), len(s))
r.Equal(d, d2)
}
}
func TestSimpleAddClaim(t *testing.T) {
r := require.New(t)
param.SetNetwork(wire.TestNet)
repo, err := noderepo.NewPebble(t.TempDir())
r.NoError(err)
m, err := NewBaseManager(repo)
r.NoError(err)
defer m.Close()
_, err = m.IncrementHeightTo(10, false)
r.NoError(err)
chg := change.NewChange(change.AddClaim).SetName(name1).SetOutPoint(out1).SetHeight(11)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(11, false)
r.NoError(err)
chg = chg.SetName(name2).SetOutPoint(out2).SetHeight(12)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(12, false)
r.NoError(err)
n1, err := m.node(name1)
r.NoError(err)
r.Equal(1, len(n1.Claims))
r.NotNil(n1.Claims.find(byOut(*out1)))
n2, err := m.node(name2)
r.NoError(err)
r.Equal(1, len(n2.Claims))
r.NotNil(n2.Claims.find(byOut(*out2)))
_, err = m.DecrementHeightTo([][]byte{name2}, 11)
r.NoError(err)
n2, err = m.node(name2)
r.NoError(err)
r.Nil(n2)
_, err = m.DecrementHeightTo([][]byte{name1}, 1)
r.NoError(err)
n2, err = m.node(name1)
r.NoError(err)
r.Nil(n2)
}
func TestSupportAmounts(t *testing.T) {
r := require.New(t)
param.SetNetwork(wire.TestNet)
repo, err := noderepo.NewPebble(t.TempDir())
r.NoError(err)
m, err := NewBaseManager(repo)
r.NoError(err)
defer m.Close()
_, err = m.IncrementHeightTo(10, false)
r.NoError(err)
chg := change.NewChange(change.AddClaim).SetName(name1).SetOutPoint(out1).SetHeight(11).SetAmount(3)
chg.ClaimID = change.NewClaimID(*out1)
m.AppendChange(chg)
chg = change.NewChange(change.AddClaim).SetName(name1).SetOutPoint(out2).SetHeight(11).SetAmount(4)
chg.ClaimID = change.NewClaimID(*out2)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(11, false)
r.NoError(err)
chg = change.NewChange(change.AddSupport).SetName(name1).SetOutPoint(out3).SetHeight(12).SetAmount(2)
chg.ClaimID = change.NewClaimID(*out1)
m.AppendChange(chg)
chg = change.NewChange(change.AddSupport).SetName(name1).SetOutPoint(out4).SetHeight(12).SetAmount(2)
chg.ClaimID = change.NewClaimID(*out2)
m.AppendChange(chg)
chg = change.NewChange(change.SpendSupport).SetName(name1).SetOutPoint(out4).SetHeight(12).SetAmount(2)
chg.ClaimID = change.NewClaimID(*out2)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(20, false)
r.NoError(err)
n1, err := m.node(name1)
r.NoError(err)
r.Equal(2, len(n1.Claims))
r.Equal(int64(5), n1.BestClaim.Amount+n1.SupportSums[n1.BestClaim.ClaimID.Key()])
}
func TestNodeSort(t *testing.T) {
r := require.New(t)
param.ActiveParams.ExtendedClaimExpirationTime = 1000
r.True(OutPointLess(*out1, *out2))
r.True(OutPointLess(*out1, *out3))
n := New()
n.Claims = append(n.Claims, &Claim{OutPoint: *out1, AcceptedAt: 3, Amount: 3, ClaimID: change.ClaimID{1}})
n.Claims = append(n.Claims, &Claim{OutPoint: *out2, AcceptedAt: 3, Amount: 3, ClaimID: change.ClaimID{2}})
n.handleExpiredAndActivated(3)
n.updateTakeoverHeight(3, []byte{}, true)
r.Equal(n.Claims.find(byOut(*out1)).OutPoint.String(), n.BestClaim.OutPoint.String())
n.Claims = append(n.Claims, &Claim{OutPoint: *out3, AcceptedAt: 3, Amount: 3, ClaimID: change.ClaimID{3}})
n.handleExpiredAndActivated(3)
n.updateTakeoverHeight(3, []byte{}, true)
r.Equal(n.Claims.find(byOut(*out1)).OutPoint.String(), n.BestClaim.OutPoint.String())
}
func TestClaimSort(t *testing.T) {
r := require.New(t)
param.ActiveParams.ExtendedClaimExpirationTime = 1000
n := New()
n.Claims = append(n.Claims, &Claim{OutPoint: *out2, AcceptedAt: 3, Amount: 3, ClaimID: change.ClaimID{2}, Status: Activated})
n.Claims = append(n.Claims, &Claim{OutPoint: *out3, AcceptedAt: 3, Amount: 2, ClaimID: change.ClaimID{3}, Status: Activated})
n.Claims = append(n.Claims, &Claim{OutPoint: *out3, AcceptedAt: 4, Amount: 2, ClaimID: change.ClaimID{4}, Status: Activated})
n.Claims = append(n.Claims, &Claim{OutPoint: *out1, AcceptedAt: 3, Amount: 4, ClaimID: change.ClaimID{1}, Status: Activated})
n.Claims = append(n.Claims, &Claim{OutPoint: *out1, AcceptedAt: 1, Amount: 9, ClaimID: change.ClaimID{5}, Status: Accepted})
n.SortClaimsByBid()
r.Equal(int64(4), n.Claims[0].Amount)
r.Equal(int64(3), n.Claims[1].Amount)
r.Equal(int64(2), n.Claims[2].Amount)
r.Equal(int32(4), n.Claims[3].AcceptedAt)
}
func TestHasChildren(t *testing.T) {
r := require.New(t)
param.SetNetwork(wire.TestNet)
repo, err := noderepo.NewPebble(t.TempDir())
r.NoError(err)
m, err := NewBaseManager(repo)
r.NoError(err)
defer m.Close()
chg := change.NewChange(change.AddClaim).SetName([]byte("a")).SetOutPoint(out1).SetHeight(1).SetAmount(2)
chg.ClaimID = change.NewClaimID(*out1)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(1, false)
r.NoError(err)
r.False(m.hasChildren([]byte("a"), 1, nil, 1))
chg = change.NewChange(change.AddClaim).SetName([]byte("ab")).SetOutPoint(out2).SetHeight(2).SetAmount(2)
chg.ClaimID = change.NewClaimID(*out2)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(2, false)
r.NoError(err)
r.False(m.hasChildren([]byte("a"), 2, nil, 2))
r.True(m.hasChildren([]byte("a"), 2, nil, 1))
chg = change.NewChange(change.AddClaim).SetName([]byte("abc")).SetOutPoint(out3).SetHeight(3).SetAmount(2)
chg.ClaimID = change.NewClaimID(*out3)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(3, false)
r.NoError(err)
r.False(m.hasChildren([]byte("a"), 3, nil, 2))
chg = change.NewChange(change.AddClaim).SetName([]byte("ac")).SetOutPoint(out1).SetHeight(4).SetAmount(2)
chg.ClaimID = change.NewClaimID(*out4)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(4, false)
r.NoError(err)
r.True(m.hasChildren([]byte("a"), 4, nil, 2))
}
func TestCollectChildren(t *testing.T) {
r := require.New(t)
c1 := change.Change{Name: []byte("ba"), Type: change.SpendClaim}
c2 := change.Change{Name: []byte("ba"), Type: change.UpdateClaim}
c3 := change.Change{Name: []byte("ac"), Type: change.SpendClaim}
c4 := change.Change{Name: []byte("ac"), Type: change.UpdateClaim}
c5 := change.Change{Name: []byte("a"), Type: change.SpendClaim}
c6 := change.Change{Name: []byte("a"), Type: change.UpdateClaim}
c7 := change.Change{Name: []byte("ab"), Type: change.SpendClaim}
c8 := change.Change{Name: []byte("ab"), Type: change.UpdateClaim}
c := []change.Change{c1, c2, c3, c4, c5, c6, c7, c8}
collectChildNames(c)
r.Empty(c[0].SpentChildren)
r.Empty(c[2].SpentChildren)
r.Empty(c[4].SpentChildren)
r.Empty(c[6].SpentChildren)
r.Len(c[1].SpentChildren, 0)
r.Len(c[3].SpentChildren, 0)
r.Len(c[5].SpentChildren, 1)
r.True(c[5].SpentChildren["ac"])
r.Len(c[7].SpentChildren, 0)
}
func TestTemporaryAddClaim(t *testing.T) {
r := require.New(t)
param.SetNetwork(wire.TestNet)
repo, err := noderepo.NewPebble(t.TempDir())
r.NoError(err)
m, err := NewBaseManager(repo)
r.NoError(err)
defer m.Close()
_, err = m.IncrementHeightTo(10, false)
r.NoError(err)
chg := change.NewChange(change.AddClaim).SetName(name1).SetOutPoint(out1).SetHeight(11)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(11, false)
r.NoError(err)
chg = chg.SetName(name2).SetOutPoint(out2).SetHeight(12)
m.AppendChange(chg)
_, err = m.IncrementHeightTo(12, true)
r.NoError(err)
n1, err := m.node(name1)
r.NoError(err)
r.Equal(1, len(n1.Claims))
r.NotNil(n1.Claims.find(byOut(*out1)))
n2, err := m.node(name2)
r.NoError(err)
r.Equal(1, len(n2.Claims))
r.NotNil(n2.Claims.find(byOut(*out2)))
names, err := m.DecrementHeightTo([][]byte{name2}, 11)
r.Equal(names[0], name2)
r.NoError(err)
n2, err = m.node(name2)
r.NoError(err)
r.Nil(n2)
_, err = m.DecrementHeightTo([][]byte{name1}, 1)
r.NoError(err)
n2, err = m.node(name1)
r.NoError(err)
r.Nil(n2)
}

342
claimtrie/node/node.go Normal file
View file

@ -0,0 +1,342 @@
package node
import (
"fmt"
"math"
"sort"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/claimtrie/param"
)
type Node struct {
BestClaim *Claim // The claim that has most effective amount at the current height.
TakenOverAt int32 // The height at when the current BestClaim took over.
Claims ClaimList // List of all Claims.
Supports ClaimList // List of all Supports, including orphaned ones.
SupportSums map[string]int64
}
// New returns a new node.
func New() *Node {
return &Node{SupportSums: map[string]int64{}}
}
func (n *Node) HasActiveBestClaim() bool {
return n.BestClaim != nil && n.BestClaim.Status == Activated
}
func (n *Node) ApplyChange(chg change.Change, delay int32) error {
visibleAt := chg.VisibleHeight
if visibleAt <= 0 {
visibleAt = chg.Height
}
switch chg.Type {
case change.AddClaim:
c := &Claim{
OutPoint: chg.OutPoint,
Amount: chg.Amount,
ClaimID: chg.ClaimID,
// CreatedAt: chg.Height,
AcceptedAt: chg.Height,
ActiveAt: chg.Height + delay,
VisibleAt: visibleAt,
Sequence: int32(len(n.Claims)),
}
// old := n.Claims.find(byOut(chg.OutPoint)) // TODO: remove this after proving ResetHeight works
// if old != nil {
// return errors.Errorf("CONFLICT WITH EXISTING TXO! Name: %s, Height: %d", chg.Name, chg.Height)
// }
n.Claims = append(n.Claims, c)
case change.SpendClaim:
c := n.Claims.find(byOut(chg.OutPoint))
if c != nil {
c.setStatus(Deactivated)
} else {
LogOnce(fmt.Sprintf("Spending claim but missing existing claim with TXO %s, "+
"Name: %s, ID: %s", chg.OutPoint, chg.Name, chg.ClaimID))
}
// apparently it's legit to be absent in the map:
// 'two' at 481100, 36a719a156a1df178531f3c712b8b37f8e7cc3b36eea532df961229d936272a1:0
case change.UpdateClaim:
// Find and remove the claim, which has just been spent.
c := n.Claims.find(byID(chg.ClaimID))
if c != nil && c.Status == Deactivated {
// Keep its ID, which was generated from the spent claim.
// And update the rest of properties.
c.setOutPoint(chg.OutPoint).SetAmt(chg.Amount)
c.setStatus(Accepted) // it was Deactivated in the spend (but we only activate at the end of the block)
// that's because the old code would put all insertions into the "queue" that was processed at block's end
// This forces us to be newer, which may in an unintentional takeover if there's an older one.
// TODO: reconsider these updates in future hard forks.
c.setAccepted(chg.Height)
c.setActiveAt(chg.Height + delay)
} else {
LogOnce(fmt.Sprintf("Updating claim but missing existing claim with ID %s", chg.ClaimID))
}
case change.AddSupport:
n.Supports = append(n.Supports, &Claim{
OutPoint: chg.OutPoint,
Amount: chg.Amount,
ClaimID: chg.ClaimID,
AcceptedAt: chg.Height,
ActiveAt: chg.Height + delay,
VisibleAt: visibleAt,
})
case change.SpendSupport:
s := n.Supports.find(byOut(chg.OutPoint))
if s != nil {
if s.Status == Activated {
n.SupportSums[s.ClaimID.Key()] -= s.Amount
}
// TODO: we could do without this Deactivated flag if we set expiration instead
// That would eliminate the above Sum update.
// We would also need to track the update situation, though, but that could be done locally.
s.setStatus(Deactivated)
} else {
LogOnce(fmt.Sprintf("Spending support but missing existing claim with TXO %s, "+
"Name: %s, ID: %s", chg.OutPoint, chg.Name, chg.ClaimID))
}
}
return nil
}
// AdjustTo activates claims and computes takeovers until it reaches the specified height.
func (n *Node) AdjustTo(height, maxHeight int32, name []byte) *Node {
changed := n.handleExpiredAndActivated(height) > 0
n.updateTakeoverHeight(height, name, changed)
if maxHeight > height {
for h := n.NextUpdate(); h <= maxHeight; h = n.NextUpdate() {
changed = n.handleExpiredAndActivated(h) > 0
n.updateTakeoverHeight(h, name, changed)
height = h
}
}
return n
}
func (n *Node) updateTakeoverHeight(height int32, name []byte, refindBest bool) {
candidate := n.BestClaim
if refindBest {
candidate = n.findBestClaim() // so expensive...
}
hasCandidate := candidate != nil
hasCurrentWinner := n.HasActiveBestClaim()
takeoverHappening := !hasCandidate || !hasCurrentWinner || candidate.ClaimID != n.BestClaim.ClaimID
if takeoverHappening {
if n.activateAllClaims(height) > 0 {
candidate = n.findBestClaim()
}
}
if !takeoverHappening && height < param.ActiveParams.MaxRemovalWorkaroundHeight {
// This is a super ugly hack to work around bug in old code.
// The bug: un/support a name then update it. This will cause its takeover height to be reset to current.
// This is because the old code would add to the cache without setting block originals when dealing in supports.
_, takeoverHappening = param.TakeoverWorkarounds[fmt.Sprintf("%d_%s", height, name)] // TODO: ditch the fmt call
}
if takeoverHappening {
n.TakenOverAt = height
n.BestClaim = candidate
}
}
func (n *Node) handleExpiredAndActivated(height int32) int {
ot := param.ActiveParams.OriginalClaimExpirationTime
et := param.ActiveParams.ExtendedClaimExpirationTime
fk := param.ActiveParams.ExtendedClaimExpirationForkHeight
expiresAt := func(c *Claim) int32 {
if c.AcceptedAt+ot > fk {
return c.AcceptedAt + et
}
return c.AcceptedAt + ot
}
changes := 0
update := func(items ClaimList, sums map[string]int64) ClaimList {
for i := 0; i < len(items); i++ {
c := items[i]
if c.Status == Accepted && c.ActiveAt <= height && c.VisibleAt <= height {
c.setStatus(Activated)
changes++
if sums != nil {
sums[c.ClaimID.Key()] += c.Amount
}
}
if c.Status == Deactivated || expiresAt(c) <= height {
if i < len(items)-1 {
items[i] = items[len(items)-1]
i--
}
items = items[:len(items)-1]
changes++
if sums != nil && c.Status != Deactivated {
sums[c.ClaimID.Key()] -= c.Amount
}
}
}
return items
}
n.Claims = update(n.Claims, nil)
n.Supports = update(n.Supports, n.SupportSums)
return changes
}
// NextUpdate returns the nearest height in the future that the node should
// be refreshed due to changes of claims or supports.
func (n Node) NextUpdate() int32 {
ot := param.ActiveParams.OriginalClaimExpirationTime
et := param.ActiveParams.ExtendedClaimExpirationTime
fk := param.ActiveParams.ExtendedClaimExpirationForkHeight
expiresAt := func(c *Claim) int32 {
if c.AcceptedAt+ot > fk {
return c.AcceptedAt + et
}
return c.AcceptedAt + ot
}
next := int32(math.MaxInt32)
for _, c := range n.Claims {
ea := expiresAt(c)
if ea < next {
next = ea
}
// if we're not active, we need to go to activeAt unless we're still invisible there
if c.Status == Accepted {
min := c.ActiveAt
if c.VisibleAt > min {
min = c.VisibleAt
}
if min < next {
next = min
}
}
}
for _, s := range n.Supports {
es := expiresAt(s)
if es < next {
next = es
}
if s.Status == Accepted {
min := s.ActiveAt
if s.VisibleAt > min {
min = s.VisibleAt
}
if min < next {
next = min
}
}
}
return next
}
func (n Node) findBestClaim() *Claim {
// WARNING: this method is called billions of times.
// if we just had some easy way to know that our best claim was the first one in the list...
// or it may be faster to cache effective amount in the db at some point.
var best *Claim
var bestAmount int64
for _, candidate := range n.Claims {
// not using switch here for performance reasons
if candidate.Status != Activated {
continue
}
if best == nil {
best = candidate
continue
}
candidateAmount := candidate.Amount + n.SupportSums[candidate.ClaimID.Key()]
if bestAmount <= 0 {
bestAmount = best.Amount + n.SupportSums[best.ClaimID.Key()]
}
switch {
case candidateAmount > bestAmount:
best = candidate
bestAmount = candidateAmount
case candidateAmount < bestAmount:
continue
case candidate.AcceptedAt < best.AcceptedAt:
best = candidate
bestAmount = candidateAmount
case candidate.AcceptedAt > best.AcceptedAt:
continue
case OutPointLess(candidate.OutPoint, best.OutPoint):
best = candidate
bestAmount = candidateAmount
}
}
return best
}
func (n *Node) activateAllClaims(height int32) int {
count := 0
for _, c := range n.Claims {
if c.Status == Accepted && c.ActiveAt > height && c.VisibleAt <= height {
c.setActiveAt(height) // don't necessarily need to change this number?
c.setStatus(Activated)
count++
}
}
for _, s := range n.Supports {
if s.Status == Accepted && s.ActiveAt > height && s.VisibleAt <= height {
s.setActiveAt(height) // don't necessarily need to change this number?
s.setStatus(Activated)
count++
n.SupportSums[s.ClaimID.Key()] += s.Amount
}
}
return count
}
func (n *Node) SortClaimsByBid() {
// purposefully sorting by descent via func parameter order:
sort.Slice(n.Claims, func(j, i int) bool {
// SupportSums only include active values; do the same for amount. No active claim will have a zero amount
iAmount := n.SupportSums[n.Claims[i].ClaimID.Key()]
if n.Claims[i].Status == Activated {
iAmount += n.Claims[i].Amount
}
jAmount := n.SupportSums[n.Claims[j].ClaimID.Key()]
if n.Claims[j].Status == Activated {
jAmount += n.Claims[j].Amount
}
switch {
case iAmount < jAmount:
return true
case iAmount > jAmount:
return false
case n.Claims[i].AcceptedAt > n.Claims[j].AcceptedAt:
return true
case n.Claims[i].AcceptedAt < n.Claims[j].AcceptedAt:
return false
}
return OutPointLess(n.Claims[j].OutPoint, n.Claims[i].OutPoint)
})
}

View file

@ -0,0 +1,188 @@
package noderepo
import (
"testing"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/claimtrie/node"
"github.com/stretchr/testify/require"
)
var (
out1 = node.NewOutPointFromString("0000000000000000000000000000000000000000000000000000000000000000:1")
testNodeName1 = []byte("name1")
)
func TestPebble(t *testing.T) {
r := require.New(t)
repo, err := NewPebble(t.TempDir())
r.NoError(err)
defer func() {
err := repo.Close()
r.NoError(err)
}()
cleanup := func() {
lowerBound := testNodeName1
upperBound := append(testNodeName1, byte(0))
err := repo.db.DeleteRange(lowerBound, upperBound, nil)
r.NoError(err)
}
testNodeRepo(t, repo, func() {}, cleanup)
}
func testNodeRepo(t *testing.T, repo node.Repo, setup, cleanup func()) {
r := require.New(t)
chg := change.NewChange(change.AddClaim).SetName(testNodeName1).SetOutPoint(out1)
testcases := []struct {
name string
height int32
changes []change.Change
expected []change.Change
}{
{
"test 1",
1,
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
[]change.Change{chg.SetHeight(1)},
},
{
"test 2",
2,
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
[]change.Change{chg.SetHeight(1)},
},
{
"test 3",
3,
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
[]change.Change{chg.SetHeight(1), chg.SetHeight(3)},
},
{
"test 4",
4,
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
[]change.Change{chg.SetHeight(1), chg.SetHeight(3)},
},
{
"test 5",
5,
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
},
{
"test 6",
6,
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
[]change.Change{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
},
}
for _, tt := range testcases {
setup()
err := repo.AppendChanges(tt.changes)
r.NoError(err)
changes, err := repo.LoadChanges(testNodeName1)
r.NoError(err)
r.Equalf(tt.expected, changes[:len(tt.expected)], tt.name)
cleanup()
}
testcases2 := []struct {
name string
height int32
changes [][]change.Change
expected []change.Change
}{
{
"Save in 2 batches, and load up to 1",
1,
[][]change.Change{
{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
{chg.SetHeight(6), chg.SetHeight(8), chg.SetHeight(9)},
},
[]change.Change{chg.SetHeight(1)},
},
{
"Save in 2 batches, and load up to 9",
9,
[][]change.Change{
{chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5)},
{chg.SetHeight(6), chg.SetHeight(8), chg.SetHeight(9)},
},
[]change.Change{
chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5),
chg.SetHeight(6), chg.SetHeight(8), chg.SetHeight(9),
},
},
{
"Save in 3 batches, and load up to 8",
8,
[][]change.Change{
{chg.SetHeight(1), chg.SetHeight(3)},
{chg.SetHeight(5)},
{chg.SetHeight(6), chg.SetHeight(8), chg.SetHeight(9)},
},
[]change.Change{
chg.SetHeight(1), chg.SetHeight(3), chg.SetHeight(5),
chg.SetHeight(6), chg.SetHeight(8),
},
},
}
for _, tt := range testcases2 {
setup()
for _, changes := range tt.changes {
err := repo.AppendChanges(changes)
r.NoError(err)
}
changes, err := repo.LoadChanges(testNodeName1)
r.NoError(err)
r.Equalf(tt.expected, changes[:len(tt.expected)], tt.name)
cleanup()
}
}
func TestIterator(t *testing.T) {
r := require.New(t)
repo, err := NewPebble(t.TempDir())
r.NoError(err)
defer func() {
err := repo.Close()
r.NoError(err)
}()
creation := []change.Change{
{Name: []byte("test\x00"), Height: 5},
{Name: []byte("test\x00\x00"), Height: 5},
{Name: []byte("test\x00b"), Height: 5},
{Name: []byte("test\x00\xFF"), Height: 5},
{Name: []byte("testa"), Height: 5},
}
err = repo.AppendChanges(creation)
r.NoError(err)
i := 0
repo.IterateChildren([]byte{}, func(changes []change.Change) bool {
r.Equal(creation[i], changes[0])
i++
return true
})
}

View file

@ -0,0 +1,177 @@
package noderepo
import (
"bytes"
"sort"
"github.com/cockroachdb/pebble"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/pkg/errors"
)
type Pebble struct {
db *pebble.DB
}
func NewPebble(path string) (*Pebble, error) {
db, err := pebble.Open(path, &pebble.Options{Cache: pebble.NewCache(64 << 20), BytesPerSync: 8 << 20, MaxOpenFiles: 2000})
repo := &Pebble{db: db}
return repo, errors.Wrapf(err, "unable to open %s", path)
}
func (repo *Pebble) AppendChanges(changes []change.Change) error {
batch := repo.db.NewBatch()
defer batch.Close()
buffer := bytes.NewBuffer(nil)
for _, chg := range changes {
buffer.Reset()
err := chg.Marshal(buffer)
if err != nil {
return errors.Wrap(err, "in marshaller")
}
err = batch.Merge(chg.Name, buffer.Bytes(), pebble.NoSync)
if err != nil {
return errors.Wrap(err, "in merge")
}
}
return errors.Wrap(batch.Commit(pebble.NoSync), "in commit")
}
func (repo *Pebble) LoadChanges(name []byte) ([]change.Change, error) {
data, closer, err := repo.db.Get(name)
if err != nil && err != pebble.ErrNotFound {
return nil, errors.Wrapf(err, "in get %s", name) // does returning a name in an error expose too much?
}
if closer != nil {
defer closer.Close()
}
return unmarshalChanges(name, data)
}
func unmarshalChanges(name, data []byte) ([]change.Change, error) {
// data is 84bytes+ per change
changes := make([]change.Change, 0, len(data)/84+1) // average is 5.1 changes
buffer := bytes.NewBuffer(data)
sortNeeded := false
for buffer.Len() > 0 {
var chg change.Change
err := chg.Unmarshal(buffer)
if err != nil {
return nil, errors.Wrap(err, "in decode")
}
chg.Name = name
if len(changes) > 0 && chg.Height < changes[len(changes)-1].Height {
sortNeeded = true // alternately: sortNeeded || chg.Height != chg.VisibleHeight
}
changes = append(changes, chg)
}
if sortNeeded {
// this was required for the normalization stuff:
sort.SliceStable(changes, func(i, j int) bool {
return changes[i].Height < changes[j].Height
})
}
return changes, nil
}
func (repo *Pebble) DropChanges(name []byte, finalHeight int32) error {
changes, err := repo.LoadChanges(name)
if err != nil {
return errors.Wrapf(err, "in load changes for %s", name)
}
buffer := bytes.NewBuffer(nil)
for i := 0; i < len(changes); i++ { // assuming changes are ordered by height
if changes[i].Height > finalHeight {
break
}
if changes[i].VisibleHeight > finalHeight { // created after this height has to be skipped
continue
}
// having to sort the changes really messes up performance here. It would be better to not remarshal
err := changes[i].Marshal(buffer)
if err != nil {
return errors.Wrap(err, "in marshaller")
}
}
// making a performance assumption that DropChanges won't happen often:
err = repo.db.Set(name, buffer.Bytes(), pebble.NoSync)
return errors.Wrapf(err, "in set at %s", name)
}
func (repo *Pebble) IterateChildren(name []byte, f func(changes []change.Change) bool) error {
start := make([]byte, len(name)+1) // zeros that last byte; need a constant len for stack alloc?
copy(start, name)
end := make([]byte, len(name)) // max name length is 255
copy(end, name)
validEnd := false
for i := len(name) - 1; i >= 0; i-- {
end[i]++
if end[i] != 0 {
validEnd = true
break
}
}
if !validEnd {
end = nil // uh, we think this means run to the end of the table
}
prefixIterOptions := &pebble.IterOptions{
LowerBound: start,
UpperBound: end,
}
iter := repo.db.NewIter(prefixIterOptions)
defer iter.Close()
for iter.First(); iter.Valid(); iter.Next() {
// NOTE! iter.Key() is ephemeral!
changes, err := unmarshalChanges(iter.Key(), iter.Value())
if err != nil {
return errors.Wrapf(err, "from unmarshaller at %s", iter.Key())
}
if !f(changes) {
break
}
}
return nil
}
func (repo *Pebble) IterateAll(predicate func(name []byte) bool) {
iter := repo.db.NewIter(nil)
defer iter.Close()
for iter.First(); iter.Valid(); iter.Next() {
if !predicate(iter.Key()) {
break
}
}
}
func (repo *Pebble) Close() error {
err := repo.db.Flush()
if err != nil {
// if we fail to close are we going to try again later?
return errors.Wrap(err, "on flush")
}
err = repo.db.Close()
return errors.Wrap(err, "on close")
}
func (repo *Pebble) Flush() error {
_, err := repo.db.AsyncFlush()
return err
}

View file

@ -0,0 +1,114 @@
package node
import (
"bytes"
"github.com/lbryio/lbcd/claimtrie/change"
"github.com/lbryio/lbcd/claimtrie/normalization"
"github.com/lbryio/lbcd/claimtrie/param"
)
type NormalizingManager struct { // implements Manager
Manager
normalizedAt int32
}
func NewNormalizingManager(baseManager Manager) Manager {
log.Info(normalization.NormalizeTitle)
return &NormalizingManager{
Manager: baseManager,
normalizedAt: -1,
}
}
func (nm *NormalizingManager) AppendChange(chg change.Change) {
chg.Name = normalization.NormalizeIfNecessary(chg.Name, chg.Height)
nm.Manager.AppendChange(chg)
}
func (nm *NormalizingManager) IncrementHeightTo(height int32, temporary bool) ([][]byte, error) {
nm.addNormalizationForkChangesIfNecessary(height)
return nm.Manager.IncrementHeightTo(height, temporary)
}
func (nm *NormalizingManager) DecrementHeightTo(affectedNames [][]byte, height int32) ([][]byte, error) {
if nm.normalizedAt > height {
nm.normalizedAt = -1
}
return nm.Manager.DecrementHeightTo(affectedNames, height)
}
func (nm *NormalizingManager) addNormalizationForkChangesIfNecessary(height int32) {
if nm.Manager.Height()+1 != height {
// initialization phase
if height >= param.ActiveParams.NormalizedNameForkHeight {
nm.normalizedAt = param.ActiveParams.NormalizedNameForkHeight // eh, we don't really know that it happened there
}
}
if nm.normalizedAt >= 0 || height != param.ActiveParams.NormalizedNameForkHeight {
return
}
nm.normalizedAt = height
log.Info("Generating necessary changes for the normalization fork...")
// the original code had an unfortunate bug where many unnecessary takeovers
// were triggered at the normalization fork
predicate := func(name []byte) bool {
norm := normalization.Normalize(name)
eq := bytes.Equal(name, norm)
if eq {
return true
}
clone := make([]byte, len(name))
copy(clone, name) // iteration name buffer is reused on future loops
// by loading changes for norm here, you can determine if there will be a conflict
n, err := nm.Manager.NodeAt(nm.Manager.Height(), clone)
if err != nil || n == nil {
return true
}
for _, c := range n.Claims {
nm.Manager.AppendChange(change.Change{
Type: change.AddClaim,
Name: norm,
Height: c.AcceptedAt,
OutPoint: c.OutPoint,
ClaimID: c.ClaimID,
Amount: c.Amount,
ActiveHeight: c.ActiveAt, // necessary to match the old hash
VisibleHeight: height, // necessary to match the old hash; it would have been much better without
})
nm.Manager.AppendChange(change.Change{
Type: change.SpendClaim,
Name: clone,
Height: height,
OutPoint: c.OutPoint,
})
}
for _, c := range n.Supports {
nm.Manager.AppendChange(change.Change{
Type: change.AddSupport,
Name: norm,
Height: c.AcceptedAt,
OutPoint: c.OutPoint,
ClaimID: c.ClaimID,
Amount: c.Amount,
ActiveHeight: c.ActiveAt,
VisibleHeight: height,
})
nm.Manager.AppendChange(change.Change{
Type: change.SpendSupport,
Name: clone,
Height: height,
OutPoint: c.OutPoint,
})
}
return true
}
nm.Manager.IterateNames(predicate)
}

31
claimtrie/node/repo.go Normal file
View file

@ -0,0 +1,31 @@
package node
import (
"github.com/lbryio/lbcd/claimtrie/change"
)
// Repo defines APIs for Node to access persistence layer.
type Repo interface {
// AppendChanges saves changes into the repo.
// The changes can belong to different nodes, but the chronological
// order must be preserved for the same node.
AppendChanges(changes []change.Change) error
// LoadChanges loads changes of a node up to (includes) the specified height.
// If no changes found, both returned slice and error will be nil.
LoadChanges(name []byte) ([]change.Change, error)
DropChanges(name []byte, finalHeight int32) error
// Close closes the repo.
Close() error
// IterateChildren returns change sets for each of name.+
// Return false on f to stop the iteration.
IterateChildren(name []byte, f func(changes []change.Change) bool) error
// IterateAll iterates keys until the predicate function returns false
IterateAll(predicate func(name []byte) bool)
Flush() error
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,61 @@
package normalization
import (
"bytes"
_ "embed"
"regexp"
"strconv"
"strings"
"unicode/utf8"
)
//go:embed CaseFolding_v11.txt
var v11 string
var foldMap map[rune][]rune
func init() {
foldMap = map[rune][]rune{}
r, _ := regexp.Compile(`([[:xdigit:]]+?); (.); ([[:xdigit:] ]+?);`)
matches := r.FindAllStringSubmatch(v11, 1000000000)
for i := range matches {
if matches[i][2] == "C" || matches[i][2] == "F" {
key, err := strconv.ParseUint(matches[i][1], 16, len(matches[i][1])*4)
if err != nil {
panic(err)
}
splits := strings.Split(matches[i][3], " ")
var values []rune
for j := range splits {
value, err := strconv.ParseUint(splits[j], 16, len(splits[j])*4)
if err != nil {
panic(err)
}
values = append(values, rune(value))
}
foldMap[rune(key)] = values
}
}
}
func caseFold(name []byte) []byte {
var b bytes.Buffer
b.Grow(len(name))
for i := 0; i < len(name); {
r, w := utf8.DecodeRune(name[i:])
if r == utf8.RuneError && w < 2 {
// HACK: their RuneError is actually a valid character if coming from a width of 2 or more
return name
}
replacements := foldMap[r]
if len(replacements) > 0 {
for j := range replacements {
b.WriteRune(replacements[j])
}
} else {
b.WriteRune(r)
}
i += w
}
return b.Bytes()
}

View file

@ -0,0 +1,177 @@
package normalization
import (
"bufio"
_ "embed"
"strconv"
"strings"
"unicode/utf8"
)
//go:embed NFC_v11.txt
var decompositions string // the data file that came from ICU 63.2
var nfdMap map[rune][]rune
var nfdOrder map[rune]int32
func init() {
nfdMap = map[rune][]rune{}
nfdOrder = map[rune]int32{}
scanner := bufio.NewScanner(strings.NewReader(decompositions))
for scanner.Scan() {
line := scanner.Text()
if len(line) <= 0 || line[0] == '#' || line[0] == '*' {
continue
}
if strings.ContainsAny(line, ":") {
// it's a ordering def:
addOrdering(line)
continue
}
splits := strings.Split(line, "=")
if len(splits) <= 1 {
splits = strings.Split(line, ">")
if len(splits) <= 1 {
continue
}
}
key, err := strconv.ParseUint(splits[0], 16, len(splits[0])*4)
if err != nil {
panic(err)
}
splits = strings.Split(splits[1], " ")
values := make([]rune, 0, len(splits))
for j := range splits {
value, err := strconv.ParseUint(splits[j], 16, len(splits[j])*4)
if err != nil {
panic(err)
}
existing := nfdMap[rune(value)]
if len(existing) > 0 {
values = append(values, existing...)
} else {
values = append(values, rune(value))
}
}
nfdMap[rune(key)] = values
}
// run one more expansion pass to catch stragglers
for key, values := range nfdMap {
for i, value := range values {
other := nfdMap[value]
if len(other) > 0 {
newValues := make([]rune, len(values)+len(other)-1)
copy(newValues, values[:i])
copy(newValues[i:i+len(other)], other)
copy(newValues[i+len(other):], values[i+1:])
nfdMap[key] = newValues
}
}
}
// assert no more expansions are necessary:
for _, values := range nfdMap {
for _, value := range values {
other := nfdMap[value]
if len(other) > 0 {
panic("Failed in NFD expansion")
}
}
}
}
func addOrdering(line string) {
splits := strings.Split(line, ":")
ranges := strings.Split(splits[0], "..")
value, err := strconv.ParseUint(splits[1], 16, len(splits[1])*4)
if err != nil {
panic(err)
}
start, err := strconv.ParseUint(ranges[0], 16, len(ranges[0])*4)
if err != nil {
panic(err)
}
end := start
if len(ranges) > 1 {
end, err = strconv.ParseUint(ranges[1], 16, len(ranges[0])*4)
if err != nil {
panic(err)
}
}
for i := start; i <= end; i++ {
nfdOrder[rune(i)] = int32(value)
}
}
func decompose(name []byte) []byte {
// see https://unicode.org/reports/tr15/ section 1.3
runes := make([]rune, 0, len(name)) // we typically use ascii don't increase the length
for i := 0; i < len(name); {
r, w := utf8.DecodeRune(name[i:])
if r == utf8.RuneError && w < 2 {
// HACK: their RuneError is actually a valid character if coming from a width of 2 or more
return name
}
replacements := nfdMap[r]
if len(replacements) > 0 {
runes = append(runes, replacements...)
} else {
hanguls := decomposeHangul(r)
if len(hanguls) > 0 {
runes = append(runes, hanguls...)
} else {
runes = append(runes, r)
}
}
i += w
}
repairOrdering(runes)
return []byte(string(runes))
}
func decomposeHangul(s rune) []rune {
// see https://www.unicode.org/versions/Unicode11.0.0/ch03.pdf
const SBase int32 = 0xAC00
const LBase int32 = 0x1100
const VBase int32 = 0x1161
const TBase int32 = 0x11A7
const LCount int32 = 19
const VCount int32 = 21
const TCount int32 = 28
const NCount = VCount * TCount // 588
const SCount = LCount * NCount // 11172
SIndex := s - SBase
if SIndex < 0 || SIndex >= SCount {
return nil
}
L := LBase + SIndex/NCount
V := VBase + (SIndex%NCount)/TCount
T := TBase + SIndex%TCount
result := []rune{L, V}
if T != TBase {
result = append(result, T)
}
return result
}
func repairOrdering(runes []rune) {
for i := 1; i < len(runes); i++ {
a := runes[i-1]
b := runes[i]
oa := nfdOrder[a]
ob := nfdOrder[b]
if oa > ob && ob > 0 {
runes[i-1], runes[i] = b, a
if i >= 2 {
i -= 2
} else {
i = 0
}
}
}
}

View file

@ -0,0 +1,22 @@
package normalization
import (
"github.com/lbryio/lbcd/claimtrie/param"
)
var Normalize = normalizeGo
var NormalizeTitle = "Normalizing strings via Go. Casefold and NFD table versions: 11.0.0 (from ICU 63.2)"
func NormalizeIfNecessary(name []byte, height int32) []byte {
if height < param.ActiveParams.NormalizedNameForkHeight {
return name
}
return Normalize(name)
}
func normalizeGo(value []byte) []byte {
normalized := decompose(value) // may need to hard-code the version on this
// not using x/text/cases because it does too good of a job; it seems to use v14 tables even when it claims v13
return caseFold(normalized)
}

View file

@ -0,0 +1,77 @@
//go:build use_icu_normalization
// +build use_icu_normalization
package normalization
// #cgo CFLAGS: -O2
// #cgo LDFLAGS: -licuio -licui18n -licuuc -licudata
// #include <unicode/unorm2.h>
// #include <unicode/ustring.h>
// #include <unicode/uversion.h>
// int icu_version() {
// UVersionInfo info;
// u_getVersion(info);
// return ((int)(info[0]) << 16) + info[1];
// }
// int normalize(char* name, int length, char* result) {
// UErrorCode ec = U_ZERO_ERROR;
// static const UNormalizer2* normalizer = NULL;
// if (normalizer == NULL) normalizer = unorm2_getNFDInstance(&ec);
// UChar dest[256]; // maximum claim name size is 255; we won't have more UTF16 chars than bytes
// int dest_len;
// u_strFromUTF8(dest, 256, &dest_len, name, length, &ec);
// if (U_FAILURE(ec) || dest_len == 0) return 0;
// UChar normalized[256];
// dest_len = unorm2_normalize(normalizer, dest, dest_len, normalized, 256, &ec);
// if (U_FAILURE(ec) || dest_len == 0) return 0;
// dest_len = u_strFoldCase(dest, 256, normalized, dest_len, U_FOLD_CASE_DEFAULT, &ec);
// if (U_FAILURE(ec) || dest_len == 0) return 0;
// u_strToUTF8(result, 512, &dest_len, dest, dest_len, &ec);
// return dest_len;
// }
import "C"
import (
"bytes"
"encoding/hex"
"fmt"
"unsafe"
)
func init() {
Normalize = normalizeICU
NormalizeTitle = "Normalizing strings via ICU. ICU version = " + IcuVersion()
}
func IcuVersion() string {
// TODO: we probably need to explode if it's not 63.2 as it affects consensus
result := C.icu_version()
return fmt.Sprintf("%d.%d", result>>16, result&0xffff)
}
func normalizeICU(value []byte) []byte {
original := value
if len(value) <= 0 {
return value
}
other := normalizeGo(value)
name := (*C.char)(unsafe.Pointer(&value[0]))
length := C.int(len(value))
// hopefully this is a stack alloc (but it may be a bit large for that):
var resultName [512]byte // inputs are restricted to 255 chars; it shouldn't expand too much past that
pointer := unsafe.Pointer(&resultName[0])
resultLength := C.normalize(name, length, (*C.char)(pointer))
if resultLength > 0 {
value = C.GoBytes(pointer, resultLength)
}
// return resultName[0:resultLength] -- we want to shrink the pointer (not use a slice on 1024)
if !bytes.Equal(other, value) {
fmt.Printf("Failed with %s, %s != %s,\n\t%s, %s != %s,\n", original, value, other,
hex.EncodeToString(original), hex.EncodeToString(value), hex.EncodeToString(other))
}
return value
}

View file

@ -0,0 +1,74 @@
//go:build use_icu_normalization
// +build use_icu_normalization
package normalization
import (
"bytes"
"encoding/hex"
"testing"
"unicode/utf8"
"github.com/stretchr/testify/assert"
)
func TestNormalizationICU(t *testing.T) {
testNormalization(t, normalizeICU)
}
func BenchmarkNormalizeICU(b *testing.B) {
benchmarkNormalize(b, normalizeICU)
}
var testStrings = []string{
"Les-Masques-Blancs-Die-Dead-place-Sathonay-28-Août",
"Bez-komentu-výbuch-z-vnútra,-radšej-pozri-video...-",
"၂-နစ်အကြာမှာ",
"ငရဲပြည်မှ-6",
"@happyvision",
"ကမ္ဘာပျက်ကိန်း-9",
"ဝိညာဉ်နား၊-3",
"un-amore-nuovo-o-un-ritorno-cosa-mi-dona",
"è-innamorato-di-me-anche-se-non-lo-dice",
"ပြင်ဆင်ပါ-no.1",
"ပြင်ဆင်ပါ-no.4",
"ပြင်ဆင်ပါ-no.2",
"ပြင်ဆင်ပါ-no.3",
"ငရဲပြည်မှ-5",
"ပြင်ဆင်ပါ-no.6",
"ပြင်ဆင်ပါ-no.5",
"ပြင်ဆင်ပါ-no.7",
"ပြင်ဆင်ပါ-no.8",
"အချိန်-2",
"ဝိညာဉ်နား၊-4",
"ပြင်ဆင်ပါ-no.-13",
"ပြင်ဆင်ပါ-no.15",
"ပြင်ဆင်ပါ-9",
"schilddrüsenhormonsubstitution-nach",
"Linxextremismus-JPzuG_UBtEg",
"Ꮖ---N---------N-Ꮹ----on-Instagram_-“Our-next-destination-is-East-and-Southeast-Asia--selfie--asia”",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
}
func TestBlock760150_1020105(t *testing.T) {
test, _ := hex.DecodeString("43efbfbd")
assert.True(t, utf8.Valid(test))
a := normalizeGo(test)
b := normalizeICU(test)
assert.Equal(t, a, b)
for i, s := range testStrings {
a = normalizeGo([]byte(s))
b = normalizeICU([]byte(s))
assert.Equal(t, a, b, "%d: %s != %s", i, string(a), string(b))
// t.Logf("%s -> %s", s, string(b))
}
}
func TestBlock1085612(t *testing.T) {
s, err := hex.DecodeString("6eccb7cd9dcc92cd90cc86cc80cc80cd91cd9dcd8acd80cd92cc94cc85cc8fccbdcda0ccbdcd80cda0cd84cc94cc8ccc9acd84cc94cd9bcda0cca7cc99ccaccd99cca9cca7")
assert.NoError(t, err)
a := normalizeICU(s)
b := normalizeGo(s)
assert.Equal(t, a, b, "%s != %s, %v", string(a), string(b), bytes.Equal(b, s))
}

View file

@ -0,0 +1,89 @@
package normalization
import (
"bufio"
"bytes"
_ "embed"
"math/rand"
"strconv"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestNormalizationGo(t *testing.T) {
testNormalization(t, normalizeGo)
}
func testNormalization(t *testing.T, normalize func(value []byte) []byte) {
r := require.New(t)
r.Equal("test", string(normalize([]byte("TESt"))))
r.Equal("test 23", string(normalize([]byte("tesT 23"))))
r.Equal("\xFF", string(normalize([]byte("\xFF"))))
r.Equal("\xC3\x28", string(normalize([]byte("\xC3\x28"))))
r.Equal("\xCF\x89", string(normalize([]byte("\xE2\x84\xA6"))))
r.Equal("\xD1\x84", string(normalize([]byte("\xD0\xA4"))))
r.Equal("\xD5\xA2", string(normalize([]byte("\xD4\xB2"))))
r.Equal("\xE3\x81\xB5\xE3\x82\x99", string(normalize([]byte("\xE3\x81\xB6"))))
r.Equal("\xE1\x84\x81\xE1\x85\xAA\xE1\x86\xB0", string(normalize([]byte("\xEA\xBD\x91"))))
}
func randSeq(n int) []byte {
var alphabet = []rune("abcdefghijklmnopqrstuvwxyz̃ABCDEFGHIJKLMNOPQRSTUVWXYZ̃")
b := make([]rune, n)
for i := range b {
b[i] = alphabet[rand.Intn(len(alphabet))]
}
return []byte(string(b))
}
func BenchmarkNormalize(b *testing.B) {
benchmarkNormalize(b, normalizeGo)
}
func benchmarkNormalize(b *testing.B, normalize func(value []byte) []byte) {
rand.Seed(42)
strings := make([][]byte, b.N)
for i := 0; i < b.N; i++ {
strings[i] = randSeq(32)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
s := normalize(strings[i])
require.True(b, len(s) >= 8)
}
}
//go:embed NormalizationTest_v11.txt
var nfdTests string
func TestDecomposition(t *testing.T) {
r := require.New(t)
scanner := bufio.NewScanner(strings.NewReader(nfdTests))
for scanner.Scan() {
line := scanner.Text()
if len(line) <= 0 || line[0] == '@' || line[0] == '#' {
continue
}
splits := strings.Split(line, ";")
source := convertToBytes(splits[0])
targetNFD := convertToBytes(splits[2])
fixed := decompose(source)
r.True(bytes.Equal(targetNFD, fixed), "Failed on %s -> %s. Got %U, not %U", splits[0], splits[2], fixed, targetNFD)
}
}
func convertToBytes(s string) []byte {
splits := strings.Split(s, " ")
var b bytes.Buffer
for i := range splits {
value, _ := strconv.ParseUint(splits[i], 16, len(splits[i])*4)
b.WriteRune(rune(value))
}
return b.Bytes()
}

285
claimtrie/param/delays.go Normal file
View file

@ -0,0 +1,285 @@
package param
var DelayWorkarounds = generateDelayWorkarounds() // called "removal workarounds" in previous versions
func generateDelayWorkarounds() map[string][]int32 {
return map[string][]int32{
"travtest01": {426898},
"gauntlet-invade-the-darkness-lvl-1-of": {583305},
"fr-let-s-play-software-inc-jay": {588308},
"fr-motorsport-manager-jay-s-racing": {588308},
"fr-crusader-kings-2-la-dynastie-6": {588318},
"fr-jurassic-world-evolution-let-s-play": {588318},
"calling-tech-support-scammers-live-3": {588683, 646584},
"let-s-play-jackbox-games": {589013},
"lets-play-jackbox-games-5": {589013},
"kabutothesnake-s-live-ps4-broadcast": {589538},
"no-eas-strong-thunderstorm-advisory": {589554},
"geometry-dash-level-requests": {589564},
"geometry-dash-level-requests-2": {589564},
"star-ocean-integrity-and-faithlessness": {589609},
"@pop": {589613},
"ullash": {589630},
"today-s-professionals-2018-winter-3": {589640},
"today-s-professionals-2018-winter-4": {589640},
"today-s-professionals-2018-winter-10": {589641},
"today-s-professionals-big-brother-6-13": {589641},
"today-s-professionals-big-brother-6-14": {589641},
"today-s-professionals-big-brother-6-26": {589641},
"today-s-professionals-big-brother-6-27": {589641},
"today-s-professionals-big-brother-6-28": {589641},
"today-s-professionals-big-brother-6-29": {589641},
"dark-souls-iii": {589697},
"bobby-blades": {589760},
"adrian": {589803},
"roblox-2": {589803, 597925},
"roblox-4": {589803},
"roblox-5": {589803},
"roblox-6": {589803},
"roblox-7": {589803},
"roblox-8": {589803},
"madden-17": {589809},
"madden-18-franchise": {589810},
"fifa-14-android-astrodude44-vs": {589831},
"gaming-with-silverwolf-live-stream-3": {589849},
"gaming-with-silverwolf-live-stream-4": {589849},
"gaming-with-silverwolf-live-stream-5": {589849},
"gaming-with-silverwolf-videos-live": {589849},
"gaming-with-silverwolf-live-stream-6": {589851},
"live-q-a": {589851},
"classic-sonic-games": {589870},
"gta": {589926},
"j-dog7973-s-fortnite-squad": {589926},
"wow-warlords-of-draenor-horde-side": {589967},
"minecraft-ps4-hardcore-survival-2-the-5": {589991},
"happy-new-year-2017": {590013},
"come-chill-with-rekzzey-2": {590020},
"counter-strike-global-offensive-funny": {590031},
"father-vs-son-stickfight-stickfight": {590178},
"little-t-playing-subnautica-livestream": {590178},
"today-s-professionals-big-brother-7-26-5": {590200},
"50585be4e3159a7-1": {590206},
"dark-souls-iii-soul-level-1-challenge": {590223},
"dark-souls-iii-soul-level-1-challenge-3": {590223},
"let-s-play-sniper-elite-4-authentic-2": {590225},
"skyrim-special-edition-ps4-platinum-4": {590225},
"let-s-play-final-fantasy-the-zodiac-2": {590226},
"let-s-play-final-fantasy-the-zodiac-3": {590226},
"ls-h-ppchen-halloween-stream-vom-31-10": {590401},
"a-new-stream": {590669},
"danganronpa-v3-killing-harmony-episode": {590708},
"danganronpa-v3-killing-harmony-episode-4": {590708},
"danganronpa-v3-killing-harmony-episode-6": {590708},
"danganronpa-v3-killing-harmony-episode-8": {590708},
"danganronpa-v3-killing-harmony-episode-9": {590708},
"call-of-duty-infinite-warfare-gameplay-2": {591982},
"destiny-the-taken-king-gameplay": {591982},
"horizon-zero-dawn-100-complete-4": {591983},
"ghost-recon-wildlands-100-complete-4": {591984},
"nier-automata-100-complete-gameplay-25": {591985},
"frustrert": {592291},
"call-of-duty-black-ops-3-multiplayer": {593504},
"rayman-legends-challenges-app-the": {593551},
"super-mario-sunshine-3-player-race-2": {593552},
"some-new-stuff-might-play-a-game": {593698},
"memory-techniques-1-000-people-system": {595537},
"propresenter-6-tutorials-new-features-4": {595559},
"rocket-league-live": {595559},
"fortnite-battle-royale": {595818},
"fortnite-battle-royale-2": {595818},
"ohare12345-s-live-ps4-broadcast": {595818},
"super-smash-bros-u-home-run-contest-13": {595838},
"super-smash-bros-u-home-run-contest-15": {595838},
"super-smash-bros-u-home-run-contest-2": {595838, 595844},
"super-smash-bros-u-home-run-contest-22": {595838, 595845},
"super-smash-bros-u-multi-man-smash-3": {595838},
"minecraft-survival-biedronka-i-czarny-2": {596828},
"gramy-minecraft-jasmc-pl": {596829},
"farcry-5-gameplay": {595818},
"my-channel-trailer": {595818},
"full-song-production-tutorial-aeternum": {596934},
"blackboxglobalreview-hd": {597091},
"tom-clancy-s-rainbow-six-siege": {597633},
"5-new-technology-innovations-in-5": {597635},
"5-new-technology-innovations-in-5-2": {597635},
"how-to-play-nothing-else-matters-on": {597637},
"rb6": {597639},
"borderlands-2-tiny-tina-s-assault-on": {597658},
"let-s-play-borderlands-the-pre-sequel": {597658},
"caveman-world-mountains-of-unga-boonga": {597660},
"for-honor-ps4-2": {597706},
"fortnite-episode-1": {597728},
"300-subscribers": {597750},
"viscera-cleanup-detail-santa-s-rampage": {597755},
"infinite-voxel-terrain-in-unity-update": {597777},
"let-s-play-pok-mon-light-platinum": {597783},
"video-2": {597785},
"video-8": {597785},
"finally": {597793},
"let-s-play-mario-party-luigi-s-engine": {597796},
"my-edited-video": {597799},
"we-need-to-talk": {597800},
"tf2-stream-2": {597811},
"royal-thumble-tuesday-night-thumbdown": {597814},
"beat-it-michael-jackson-cover": {597815},
"black-ops-3": {597816},
"call-of-duty-black-ops-3-campaign": {597819},
"skyrim-special-edition-silent-2": {597822},
"the-chainsmokers-everybody-hates-me": {597823},
"experiment-glowing-1000-degree-knife-vs": {597824},
"l1011widebody-friends-let-s-play-2": {597824},
"call-of-duty-black-ops-4": {597825},
"let-s-play-fallout-2-restoration-3": {597825},
"let-s-play-fallout-2-restoration-19": {597826},
"let-s-play-fallout-2-restoration-27": {597826},
"2015": {597828},
"payeer": {597829},
"youtube-3": {597829},
"bitcoin-5": {597830},
"2016": {597831},
"bitcoin-2": {597831},
"dreamtowards": {597831},
"surfearner": {597831},
"100-000": {597832},
"20000": {597833},
"remme": {597833},
"hycon": {597834},
"robocraft": {597834},
"saturday-night-baseball-with-37": {597834},
"let-s-play-command-conquer-red-alert-9": {597835},
"15-curiosidades-que-probablemente-ya": {597837},
"elder-scrolls-online-road-to-level-20": {597893},
"playerunknown-s-battlegrounds": {597894},
"black-ops-3-fun": {597897},
"mortal-kombat-xl-the-funniest": {597899},
"try-not-to-laugh-2": {597899},
"call-of-duty-advanced-warfare-domination": {597898},
"my-live-stream-with-du-recorder-5": {597900},
"ls-h-ppchen-halloween-stream-vom-31-10-2": {597904},
"ls-h-ppchen-halloween-stream-vom-31-10-3": {597904},
"how-it-feels-to-chew-5-gum-funny-8": {597905},
"live-stream-mu-club-america-3": {597918},
"black-death": {597927},
"lets-play-spore-with-3": {597929},
"true-mov-2": {597933},
"fortnite-w-pat-the-rat-pat-the-rat": {597935},
"jugando-pokemon-esmeralda-gba": {597935},
"talking-about-my-channel-and-much-more-4": {597936},
"-14": {597939},
"-15": {597939},
"-16": {597939},
"-17": {597939},
"-18": {597939},
"-20": {597939},
"-21": {597939},
"-24": {597939},
"-25": {597939},
"-26": {597939},
"-27": {597939},
"-28": {597939},
"-29": {597939},
"-31": {597941},
"-34": {597941},
"-6": {597939},
"-7": {597939},
"10-4": {612097},
"10-6": {612097},
"10-7": {612097},
"10-diy": {612097},
"10-twitch": {612097},
"100-5": {597909},
"189f2f04a378c02-1": {612097},
"2011-2": {597917},
"2011-3": {597917},
"2c61c818687ed09-1": {612097},
"5-diy-4": {612097},
"@andymcdandycdn": {640212},
"@lividjava": {651654},
"@mhx": {653957},
"@tipwhatyoulike": {599792},
"@wibbels": {612195},
"@yisraeldov": {647416},
"beyaz-hap-biseks-el-evlat": {657957},
"bilgisayar-al-t-rma-s-recinde-ya-ananlar": {657957},
"brave-como-ganhar-dinheiro-todos-os-dias": {598494},
"c81e728d9d4c2f6-1": {598178},
"call-of-duty-world-war-2": {597935},
"chain-reaction": {597940},
"commodore-64-an-lar-ve-oyunlar": {657957},
"counter-strike-global-offensive-gameplay": {597900},
"dead-island-riptide-co-op-walkthrough-2": {597904, 598105},
"diy-10": {612097},
"diy-11": {612097},
"diy-13": {612097},
"diy-14": {612097},
"diy-19": {612097},
"diy-4": {612097},
"diy-6": {612097},
"diy-7": {612097},
"diy-9": {612097},
"doktor-ve-patron-sahnesinin-haz-rl-k-ve": {657957},
"eat-the-street": {597910},
"fallout-4-modded": {597901},
"fallout-4-walkthrough": {597900},
"filmli-efecast-129-film-inde-film-inde": {657957},
"filmli-efecast-130-ger-ek-hayatta-anime": {657957},
"filmli-efecast-97-netflix-filmi-form-l": {657957},
"for-honor-2": {597932},
"for-honor-4": {597932},
"gta-5": {597902},
"gta-5-2": {597902},
"helldriver-g-n-n-ekstrem-filmi": {657957},
"hi-4": {597933},
"hi-5": {597933},
"hi-7": {597933},
"kizoa-movie-video-slideshow-maker": {597900, 597932},
"l1011widebody-friends-let-s-play-3": {598070},
"lbry": {608276},
"lets-play-spore-with": {597930},
"madants": {625032},
"mechwarrior-2-soundtrack-clan-jade": {598070},
"milo-forbidden-conversation": {655173},
"mobile-record": {597910},
"mouths": {607379},
"mp-aleyna-tilki-nin-zorla-seyrettirilen": {657957},
"mp-atat-rk-e-eytan-diyen-yunan-as-ll": {657957},
"mp-bah-eli-calan-avukatlar-yla-g-r-s-n": {657957},
"mp-bu-podcast-babalar-in": {657957},
"mp-bu-podcasti-akp-li-tan-d-klar-n-za": {657957},
"mp-gaziantep-te-tacizle-su-lan-p-dayak": {650409},
"mp-hatipo-lu-nun-ermeni-bir-ocu-u-canl": {657957},
"mp-k-rt-annelerin-hdp-ye-tepkisi": {657957},
"mp-kenan-sofuo-lu-nun-mamo-lu-na-destek": {657957},
"mp-mamo-lu-nun-muhafazakar-g-r-nmesi": {657957},
"mp-mhp-akp-gerginli-i": {657957},
"mp-otob-ste-t-rkle-meyin-diye-ba-ran-svi": {657957},
"mp-pace-i-kazand-m-diyip-21-bin-dolar": {657957},
"mp-rusya-da-kad-nlara-tecav-zc-s-n-ld": {657957},
"mp-s-n-rs-z-nafakan-n-kalkmas-adil-mi": {657957},
"mp-susamam-ark-s-ve-serkan-nci-nin-ark": {657957},
"mp-y-lmaz-zdil-in-kitap-paralar-yla-yard": {657957},
"mp-yang-n-u-aklar-pahal-diyen-orman": {657957},
"mp-yeni-zelanda-katliam-ndan-siyasi-rant": {657957},
"my-edited-video-4": {597932},
"my-live-stream-with-du-recorder": {597900},
"my-live-stream-with-du-recorder-3": {597900},
"new-channel-intro": {598235},
"paladins-3": {597900},
"popstar-sahnesi-kamera-arkas-g-r-nt-leri": {657957},
"retro-bilgisayar-bulu-mas": {657957},
"scp-t-rk-e-scp-002-canl-oda": {657957},
"steep": {597900},
"stephen-hicks-postmodernism-reprise": {655173},
"super-smash-bros-u-brawl-co-op-event": {595841},
"super-smash-bros-u-super-mario-u-smash": {595839},
"super-smash-bros-u-zelda-smash-series": {595841},
"superonline-fiber-den-efsane-kaz-k-yedim": {657957},
"talking-about-my-channel-and-much-more-5": {597936},
"test1337reflector356": {627814},
"the-last-of-us-remastered-2": {597915},
"tom-clancy-s-ghost-recon-wildlands-2": {597916},
"tom-clancy-s-rainbow-six-siege-3": {597935},
"wwe-2k18-with-that-guy-and-tricky": {597901},
"yay-nc-bob-afet-kamera-arkas": {657957},
}
}

View file

@ -0,0 +1,74 @@
package param
import "github.com/lbryio/lbcd/wire"
type ClaimTrieParams struct {
MaxActiveDelay int32
ActiveDelayFactor int32
MaxNodeManagerCacheSize int
OriginalClaimExpirationTime int32
ExtendedClaimExpirationTime int32
ExtendedClaimExpirationForkHeight int32
MaxRemovalWorkaroundHeight int32
NormalizedNameForkHeight int32
AllClaimsInMerkleForkHeight int32
}
var (
ActiveParams = MainNet
MainNet = ClaimTrieParams{
MaxActiveDelay: 4032,
ActiveDelayFactor: 32,
MaxNodeManagerCacheSize: 32000,
OriginalClaimExpirationTime: 262974,
ExtendedClaimExpirationTime: 2102400,
ExtendedClaimExpirationForkHeight: 400155, // https://lbry.io/news/hf1807
MaxRemovalWorkaroundHeight: 658300,
NormalizedNameForkHeight: 539940, // targeting 21 March 2019}, https://lbry.com/news/hf1903
AllClaimsInMerkleForkHeight: 658309, // targeting 30 Oct 2019}, https://lbry.com/news/hf1910
}
TestNet = ClaimTrieParams{
MaxActiveDelay: 4032,
ActiveDelayFactor: 32,
MaxNodeManagerCacheSize: 32000,
OriginalClaimExpirationTime: 262974,
ExtendedClaimExpirationTime: 2102400,
ExtendedClaimExpirationForkHeight: 278160,
MaxRemovalWorkaroundHeight: 1, // if you get a hash mismatch, come back to this
NormalizedNameForkHeight: 993380,
AllClaimsInMerkleForkHeight: 1198559,
}
Regtest = ClaimTrieParams{
MaxActiveDelay: 4032,
ActiveDelayFactor: 32,
MaxNodeManagerCacheSize: 32000,
OriginalClaimExpirationTime: 500,
ExtendedClaimExpirationTime: 600,
ExtendedClaimExpirationForkHeight: 800,
MaxRemovalWorkaroundHeight: -1,
NormalizedNameForkHeight: 250,
AllClaimsInMerkleForkHeight: 349,
}
)
func SetNetwork(net wire.BitcoinNet) {
switch net {
case wire.MainNet:
ActiveParams = MainNet
case wire.TestNet3:
ActiveParams = TestNet
case wire.TestNet, wire.SimNet: // "regtest"
ActiveParams = Regtest
}
}

View file

@ -0,0 +1,451 @@
package param
var TakeoverWorkarounds = generateTakeoverWorkarounds()
func generateTakeoverWorkarounds() map[string]int { // TODO: the values here are unused; bools would probably be better
return map[string]int{
"496856_HunterxHunterAMV": 496835,
"542978_namethattune1": 542429,
"543508_namethattune-5": 543306,
"546780_forecasts": 546624,
"548730_forecasts": 546780,
"551540_forecasts": 548730,
"552380_chicthinkingofyou": 550804,
"560363_takephotowithlbryteam": 559962,
"563710_test-img": 563700,
"566750_itila": 543261,
"567082_malabarismo-com-bolas-de-futebol-vs-chap": 563592,
"596860_180mphpullsthrougheurope": 596757,
"617743_vaccines": 572756,
"619609_copface-slamshandcuffedteengirlintoconcrete": 539940,
"620392_banker-exposes-satanic-elite": 597788,
"624997_direttiva-sulle-armi-ue-in-svizzera-di": 567908,
"624997_best-of-apex": 585580,
"629970_cannot-ignore-my-veins": 629914,
"633058_bio-waste-we-programmed-your-brain": 617185,
"633601_macrolauncher-overview-first-look": 633058,
"640186_its-up-to-you-and-i-2019": 639116,
"640241_tor-eas-3-20": 592645,
"640522_seadoxdark": 619531,
"640617_lbry-przewodnik-1-instalacja": 451186,
"640623_avxchange-2019-the-next-netflix-spotify": 606790,
"640684_algebra-introduction": 624152,
"640684_a-high-school-math-teacher-does-a": 600885,
"640684_another-random-life-update": 600884,
"640684_who-is-the-taylor-series-for": 600882,
"640684_tedx-talk-released": 612303,
"640730_e-mental": 615375,
"641143_amiga-1200-bespoke-virgin-cinema": 623542,
"641161_dreamscape-432-omega": 618894,
"641162_2019-topstone-carbon-force-etap-axs-bike": 639107,
"641186_arin-sings-big-floppy-penis-live-jazz-2": 638904,
"641421_edward-snowden-on-bitcoin-and-privacy": 522729,
"641421_what-is-libra-facebook-s-new": 598236,
"641421_what-are-stablecoins-counter-party-risk": 583508,
"641421_anthony-pomp-pompliano-discusses-crypto": 564416,
"641421_tim-draper-crypto-invest-summit-2019": 550329,
"641421_mass-adoption-and-what-will-it-take-to": 549781,
"641421_dragonwolftech-youtube-channel-trailer": 567128,
"641421_naomi-brockwell-s-weekly-crypto-recap": 540006,
"641421_blockchain-based-youtube-twitter": 580809,
"641421_andreas-antonopoulos-on-privacy-privacy": 533522,
"641817_mexico-submits-and-big-tech-worsens": 582977,
"641817_why-we-need-travel-bans": 581354,
"641880_censored-by-patreon-bitchute-shares": 482460,
"641880_crypto-wonderland": 485218,
"642168_1-diabolo-julio-cezar-16-cbmcp-freestyle": 374999,
"642314_tough-students": 615780,
"642697_gamercauldronep2": 642153,
"643406_the-most-fun-i-ve-had-in-a-long-time": 616506,
"643893_spitshine69-and-uk-freedom-audits": 616876,
"644480_my-mum-getting-attacked-a-duck": 567624,
"644486_the-cryptocurrency-experiment": 569189,
"644486_tag-you-re-it": 558316,
"644486_orange-county-mineral-society-rock-and": 397138,
"644486_sampling-with-the-gold-rush-nugget": 527960,
"644562_september-15-21-a-new-way-of-doing": 634792,
"644562_july-week-3-collective-frequency-general": 607942,
"644562_september-8-14-growing-up-general": 630977,
"644562_august-4-10-collective-frequency-general": 612307,
"644562_august-11-17-collective-frequency": 617279,
"644562_september-1-7-gentle-wake-up-call": 627104,
"644607_no-more-lol": 643497,
"644607_minion-masters-who-knew": 641313,
"645236_danganronpa-3-the-end-of-hope-s-peak": 644153,
"645348_captchabot-a-discord-bot-to-protect-your": 592810,
"645701_the-xero-hour-saint-greta-of-thunberg": 644081,
"645701_batman-v-superman-theological-notions": 590189,
"645918_emacs-is-great-ep-0-init-el-from-org": 575666,
"645918_emacs-is-great-ep-1-packages": 575666,
"645918_emacs-is-great-ep-40-pt-2-hebrew": 575668,
"645923_nasal-snuff-review-osp-batch-2": 575658,
"645923_why-bit-coin": 575658,
"645929_begin-quest": 598822,
"645929_filthy-foe": 588386,
"645929_unsanitary-snow": 588386,
"645929_famispam-1-music-box": 588386,
"645929_running-away": 598822,
"645931_my-beloved-chris-madsen": 589114,
"645931_space-is-consciousness-chris-madsen": 589116,
"645947_gasifier-rocket-stove-secondary-burn": 590595,
"645949_mouse-razer-abyssus-v2-e-mousepad": 591139,
"645949_pr-temporada-2018-league-of-legends": 591138,
"645949_windows-10-build-9901-pt-br": 591137,
"645949_abrindo-pacotes-do-festival-lunar-2018": 591139,
"645949_unboxing-camisetas-personalizadas-play-e": 591138,
"645949_abrindo-envelopes-do-festival-lunar-2017": 591138,
"645951_grub-my-grub-played-guruku-tersayang": 618033,
"645951_ismeeltimepiece": 618038,
"645951_thoughts-on-doom": 596485,
"645951_thoughts-on-god-of-war-about-as-deep-as": 596485,
"645956_linux-lite-3-6-see-what-s-new": 645195,
"646191_kahlil-gibran-the-prophet-part-1": 597637,
"646551_crypto-market-crash-should-you-sell-your": 442613,
"646551_live-crypto-trading-and-market-analysis": 442615,
"646551_5-reasons-trading-is-always-better-than": 500850,
"646551_digitex-futures-dump-panic-selling-or": 568065,
"646552_how-to-install-polarr-on-kali-linux-bynp": 466235,
"646586_electoral-college-kids-civics-lesson": 430818,
"646602_grapes-full-90-minute-watercolour": 537108,
"646602_meizu-mx4-the-second-ubuntu-phone": 537109,
"646609_how-to-set-up-the-ledger-nano-x": 569992,
"646609_how-to-buy-ethereum": 482354,
"646609_how-to-install-setup-the-exodus-multi": 482356,
"646609_how-to-manage-your-passwords-using": 531987,
"646609_cryptodad-s-live-q-a-friday-may-3rd-2019": 562303,
"646638_resident-evil-ada-chapter-5-final": 605612,
"646639_taurus-june-2019-career-love-tarot": 586910,
"646652_digital-bullpen-ep-5-building-a-digital": 589274,
"646661_sunlight": 591076,
"646661_grasp-lab-nasa-open-mct-series": 589414,
"646663_bunnula-s-creepers-tim-pool-s-beanie-a": 599669,
"646663_bunnula-music-hey-ya-by-outkast": 605685,
"646663_bunnula-tv-s-music-television-eunoia": 644437,
"646663_the-pussy-centipede-40-sneakers-and": 587265,
"646663_bunnula-reacts-ashton-titty-whitty": 596988,
"646677_filip-reviews-jeromes-dream-cataracts-so": 589751,
"646691_fascism-and-its-mobilizing-passions": 464342,
"646692_hsb-color-layers-action-for-adobe": 586533,
"646692_master-colorist-action-pack-extracting": 631830,
"646693_how-to-protect-your-garden-from-animals": 588476,
"646693_gardening-for-the-apocalypse-epic": 588472,
"646693_my-first-bee-hive-foundationless-natural": 588469,
"646693_dragon-fruit-and-passion-fruit-planting": 588470,
"646693_installing-my-first-foundationless": 588469,
"646705_first-naza-fpv": 590411,
"646717_first-burning-man-2019-detour-034": 630247,
"646717_why-bob-marley-was-an-idiot-test-driving": 477558,
"646717_we-are-addicted-to-gambling-ufc-207-w": 481398,
"646717_ghetto-swap-meet-selling-storage-lockers": 498291,
"646738_1-kings-chapter-7-summary-and-what-god": 586599,
"646814_brand-spanking-new-junior-high-school": 592378,
"646814_lupe-fiasco-freestyle-at-end-of-the-weak": 639535,
"646824_how-to-one-stroke-painting-doodles-mixed": 592404,
"646824_acrylic-pouring-landscape-with-a-tree": 592404,
"646824_how-to-make-a-diy-concrete-paste-planter": 595976,
"646824_how-to-make-a-rustic-sand-planter-sand": 592404,
"646833_3-day-festival-at-the-galilee-lake-and": 592842,
"646833_rainbow-circle-around-the-noon-sun-above": 592842,
"646833_energetic-self-control-demonstration": 623811,
"646833_bees-congregating": 592842,
"646856_formula-offroad-honefoss-sunday-track2": 592872,
"646862_h3video1-dc-vs-mb-1": 593237,
"646862_h3video1-iwasgoingto-load-up-gmod-but": 593237,
"646883_watch-this-game-developer-make-a-video": 592593,
"646883_how-to-write-secure-javascript": 592593,
"646883_blockchain-technology-explained-2-hour": 592593,
"646888_fl-studio-bits": 608155,
"646914_andy-s-shed-live-s03e02-the-longest": 592200,
"646914_gpo-telephone-776-phone-restoration": 592201,
"646916_toxic-studios-co-stream-pubg": 597126,
"646916_hyperlapse-of-prague-praha-from-inside": 597109,
"646933_videobits-1": 597378,
"646933_clouds-developing-daytime-8": 597378,
"646933_slechtvalk-in-watertoren-bodegraven": 597378,
"646933_timelapse-maansverduistering-16-juli": 605880,
"646933_startrails-27": 597378,
"646933_passing-clouds-daytime-3": 597378,
"646940_nerdgasm-unboxing-massive-playing-cards": 597421,
"646946_debunking-cops-volume-3-the-murder-of": 630570,
"646961_kingsong-ks16x-electric-unicycle-250km": 636725,
"646968_wild-mountain-goats-amazing-rock": 621940,
"646968_no-shelter-backcountry-camping-in": 621940,
"646968_can-i-live-in-this-through-winter-lets": 645750,
"646968_why-i-wear-a-chest-rig-backcountry-or": 621940,
"646989_marc-ivan-o-gorman-promo-producer-editor": 645656,
"647045_@moraltis": 646367,
"647045_moraltis-twitch-highlights-first-edit": 646368,
"647075_the-3-massive-tinder-convo-mistakes": 629464,
"647075_how-to-get-friend-zoned-via-text": 592298,
"647075_don-t-do-this-on-tinder": 624591,
"647322_world-of-tanks-7-kills": 609905,
"647322_the-tier-6-auto-loading-swedish-meatball": 591338,
"647416_hypnotic-soundscapes-garden-of-the": 596923,
"647416_hypnotic-soundscapes-the-cauldron-sacred": 596928,
"647416_schumann-resonance-to-theta-sweep": 596920,
"647416_conversational-indirect-hypnosis-why": 596913,
"647493_mimirs-brunnr": 590498,
"648143_live-ita-completiamo-the-evil-within-2": 646568,
"648203_why-we-love-people-that-hurt-us": 591128,
"648203_i-didn-t-like-my-baby-and-considered": 591128,
"648220_trade-talk-001-i-m-a-vlogger-now-fielder": 597303,
"648220_vise-restoration-record-no-6-vise": 597303,
"648540_amv-reign": 571863,
"648540_amv-virus": 571863,
"648588_audial-drift-(a-journey-into-sound)": 630217,
"648616_quick-zbrush-tip-transpose-master-scale": 463205,
"648616_how-to-create-3d-horns-maya-to-zbrush-2": 463205,
"648815_arduino-based-cartridge-game-handheld": 593252,
"648815_a-maze-update-3-new-game-modes-amazing": 593252,
"649209_denmark-trip": 591428,
"649209_stunning-4k-drone-footage": 591428,
"649215_how-to-create-a-channel-and-publish-a": 414908,
"649215_lbryclass-11-how-to-get-your-deposit": 632420,
"649543_spring-break-madness-at-universal": 599698,
"649921_navegador-brave-navegador-da-web-seguro": 649261,
"650191_stream-intro": 591301,
"650946_platelet-chan-fan-art": 584601,
"650946_aqua-fanart": 584601,
"650946_virginmedia-stores-password-in-plain": 619537,
"650946_running-linux-on-android-teaser": 604441,
"650946_hatsune-miku-ievan-polka": 600126,
"650946_digital-security-and-privacy-2-and-a-new": 600135,
"650993_my-editorial-comment-on-recent-youtube": 590305,
"650993_drive-7-18-2018": 590305,
"651011_old-world-put-on-realm-realms-gg": 591899,
"651011_make-your-own-soundboard-with-autohotkey": 591899,
"651011_ark-survival-https-discord-gg-ad26xa": 637680,
"651011_minecraft-featuring-seus-8-just-came-4": 596488,
"651057_found-footage-bikinis-at-the-beach-with": 593586,
"651057_found-footage-sexy-mom-a-mink-stole": 593586,
"651067_who-are-the-gentiles-gomer": 597094,
"651067_take-back-the-kingdom-ep-2-450-million": 597094,
"651067_mmxtac-implemented-footstep-sounds-and": 597094,
"651067_dynasoul-s-blender-to-unreal-animated": 597094,
"651103_calling-a-scammer-syntax-error": 612532,
"651103_quick-highlight-of-my-day": 647651,
"651103_calling-scammers-and-singing-christmas": 612531,
"651109_@livingtzm": 637322,
"651109_living-tzm-juuso-from-finland-september": 643412,
"651373_se-voc-rir-ou-sorrir-reinicie-o-v-deo": 649302,
"651476_what-is-pagan-online-polished-new-arpg": 592157,
"651476_must-have-elder-scrolls-online-addons": 592156,
"651476_who-should-play-albion-online": 592156,
"651730_person-detection-with-keras-tensorflow": 621276,
"651730_youtube-censorship-take-two": 587249,
"651730_new-red-tail-shark-and-two-silver-sharks": 587251,
"651730_around-auckland": 587250,
"651730_humanism-in-islam": 587250,
"651730_tigers-at-auckland-zoo": 587250,
"651730_gravity-demonstration": 587250,
"651730_copyright-question": 587249,
"651730_uberg33k-the-ultimate-software-developer": 599522,
"651730_chl-e-swarbrick-auckland-mayoral": 587250,
"651730_code-reviews": 587249,
"651730_raising-robots": 587251,
"651730_teaching-python": 587250,
"651730_kelly-tarlton-2016": 587250,
"652172_where-is-everything": 589491,
"652172_some-guy-and-his-camera": 617062,
"652172_practical-information-pt-1": 589491,
"652172_latent-vibrations": 589491,
"652172_maldek-compilation": 589491,
"652444_thank-you-etika-thank-you-desmond": 652121,
"652611_plants-vs-zombies-gw2-20190827183609": 624339,
"652611_wolfenstein-the-new-order-playthrough-6": 650299,
"652887_a-codeigniter-cms-open-source-download": 652737,
"652966_@pokesadventures": 632391,
"653009_flat-earth-uk-convention-is-a-bust": 585786,
"653009_flat-earth-reset-flat-earth-money-tree": 585786,
"653011_veil-of-thorns-dispirit-brutal-leech-3": 652475,
"653069_being-born-after-9-11": 632218,
"653069_8-years-on-youtube-what-it-has-done-for": 637130,
"653069_answering-questions-how-original": 521447,
"653069_talking-about-my-first-comedy-stand-up": 583450,
"653069_doing-push-ups-in-public": 650920,
"653069_vlog-extra": 465997,
"653069_crying-myself": 465997,
"653069_xbox-rejection": 465992,
"653354_msps-how-to-find-a-linux-job-where-no": 642537,
"653354_windows-is-better-than-linux-vlog-it-and": 646306,
"653354_luke-smith-is-wrong-about-everything": 507717,
"653354_advice-for-those-starting-out-in-tech": 612452,
"653354_treating-yourself-to-make-studying-more": 623561,
"653354_lpi-linux-essential-dns-tools-vlog-what": 559464,
"653354_is-learning-linux-worth-it-in-2019-vlog": 570886,
"653354_huawei-linux-and-cellphones-in-2019-vlog": 578501,
"653354_how-to-use-webmin-to-manage-linux": 511507,
"653354_latency-concurrency-and-the-best-value": 596857,
"653354_how-to-use-the-pomodoro-method-in-it": 506632,
"653354_negotiating-compensation-vlog-it-and": 542317,
"653354_procedural-goals-vs-outcome-goals-vlog": 626785,
"653354_intro-to-raid-understanding-how-raid": 529341,
"653354_smokeping": 574693,
"653354_richard-stallman-should-not-be-fired": 634928,
"653354_unusual-or-specialty-certifications-vlog": 620146,
"653354_gratitude-and-small-projects-vlog-it": 564900,
"653354_why-linux-on-the-smartphone-is-important": 649543,
"653354_opportunity-costs-vlog-it-devops-career": 549708,
"653354_double-giveaway-lpi-class-dates-and": 608129,
"653354_linux-on-the-smartphone-in-2019-librem": 530426,
"653524_celtic-folk-music-full-live-concert-mps": 589762,
"653745_aftermath-of-the-mac": 592768,
"653745_b-c-a-glock-17-threaded-barrel": 592770,
"653800_middle-earth-shadow-of-mordor-by": 590229,
"654079_tomand-jeremy-chirs45": 614296,
"654096_achamos-carteira-com-grana-olha-o-que": 466262,
"654096_viagem-bizarra-e-cansativa-ao-nordeste": 466263,
"654096_tedio-na-tailandia-limpeza-de-area": 466265,
"654425_schau-bung-2014-in-windischgarsten": 654410,
"654425_mitternachtseinlage-ball-rk": 654410,
"654425_zugabe-ball-rk-windischgarsten": 654412,
"654722_skytrain-in-korea": 463145,
"654722_luwak-coffee-the-shit-coffee": 463155,
"654722_puppet-show-in-bangkok-thailand": 462812,
"654722_kyaito-market-myanmar": 462813,
"654724_wipeout-zombies-bo3-custom-zombies-1st": 589569,
"654724_the-street-bo3-custom-zombies": 589544,
"654880_wwii-airsoft-pow": 586968,
"654880_dueling-geese-fight-to-the-death": 586968,
"654880_wwii-airsoft-torgau-raw-footage-part4": 586968,
"655173_april-2019-q-and-a": 554032,
"655173_the-meaning-and-reality-of-individual": 607892,
"655173_steven-pinker-progress-despite": 616984,
"655173_we-make-stories-out-of-totem-poles": 549090,
"655173_jamil-jivani-author-of-why-young-men": 542035,
"655173_commentaries-on-jb-peterson-rebel-wisdom": 528898,
"655173_auckland-clip-4-on-cain-and-abel": 629242,
"655173_peterson-vs-zizek-livestream-tickets": 545285,
"655173_auckland-clip-3-the-dawning-of-the-moral": 621154,
"655173_religious-belief-and-the-enlightenment": 606269,
"655173_auckland-lc-highlight-1-the-presumption": 565783,
"655173_q-a-sir-roger-scruton-dr-jordan-b": 544184,
"655173_cancellation-polish-national-foundation": 562529,
"655173_the-coddling-of-the-american-mind-haidt": 440185,
"655173_02-harris-weinstein-peterson-discussion": 430896,
"655173_jordan-peterson-threatens-everything-of": 519737,
"655173_on-claiming-belief-in-god-commentary": 581738,
"655173_how-to-make-the-world-better-really-with": 482317,
"655173_quillette-discussion-with-founder-editor": 413749,
"655173_jb-peterson-on-free-thought-and-speech": 462849,
"655173_marxism-zizek-peterson-official-video": 578453,
"655173_patreon-problem-solution-dave-rubin-dr": 490394,
"655173_next-week-st-louis-salt-lake-city": 445933,
"655173_conversations-with-john-anderson-jordan": 529981,
"655173_nz-australia-12-rules-tour-next-2-weeks": 518649,
"655173_a-call-to-rebellion-for-ontario-legal": 285451,
"655173_2016-personality-lecture-12": 578465,
"655173_on-the-vital-necessity-of-free-speech": 427404,
"655173_2017-01-23-social-justice-freedom-of": 578465,
"655173_discussion-sam-harris-the-idw-and-the": 423332,
"655173_march-2018-patreon-q-a": 413749,
"655173_take-aim-even-badly": 490395,
"655173_jp-f-wwbgo6a2w": 539940,
"655173_patreon-account-deletion": 503477,
"655173_canada-us-europe-tour-august-dec-2018": 413749,
"655173_leaders-myth-reality-general-stanley": 514333,
"655173_jp-ifi5kkxig3s": 539940,
"655173_documentary-a-glitch-in-the-matrix-david": 413749,
"655173_2017-08-14-patreon-q-and-a": 285451,
"655173_postmodernism-history-and-diagnosis": 285451,
"655173_23-minutes-from-maps-of-meaning-the": 413749,
"655173_milo-forbidden-conversation": 578493,
"655173_jp-wnjbasba-qw": 539940,
"655173_uk-12-rules-tour-october-and-november": 462849,
"655173_2015-maps-of-meaning-10-culture-anomaly": 578465,
"655173_ayaan-hirsi-ali-islam-mecca-vs-medina": 285452,
"655173_jp-f9393el2z1i": 539940,
"655173_campus-indoctrination-the-parasitization": 285453,
"655173_jp-owgc63khcl8": 539940,
"655173_the-death-and-resurrection-of-christ-a": 413749,
"655173_01-harris-weinstein-peterson-discussion": 430896,
"655173_enlightenment-now-steven-pinker-jb": 413749,
"655173_the-lindsay-shepherd-affair-update": 413749,
"655173_jp-g3fwumq5k8i": 539940,
"655173_jp-evvs3l-abv4": 539940,
"655173_former-australian-deputy-pm-john": 413750,
"655173_message-to-my-korean-readers-90-seconds": 477424,
"655173_jp--0xbomwjkgm": 539940,
"655173_ben-shapiro-jordan-peterson-and-a-12": 413749,
"655173_jp-91jwsb7zyhw": 539940,
"655173_deconstruction-the-lindsay-shepherd": 299272,
"655173_september-patreon-q-a": 285451,
"655173_jp-2c3m0tt5kce": 539940,
"655173_australia-s-john-anderson-dr-jordan-b": 413749,
"655173_jp-hdrlq7dpiws": 539940,
"655173_stephen-hicks-postmodernism-reprise": 578480,
"655173_october-patreon-q-a": 285451,
"655173_an-animated-intro-to-truth-order-and": 413749,
"655173_jp-bsh37-x5rny": 539940,
"655173_january-2019-q-a": 503477,
"655173_comedians-canaries-and-coalmines": 498586,
"655173_the-democrats-apology-and-promise": 465433,
"655173_jp-s4c-jodptn8": 539940,
"655173_2014-personality-lecture-16-extraversion": 578465,
"655173_dr-jordan-b-peterson-on-femsplainers": 490395,
"655173_higher-ed-our-cultural-inflection-point": 527291,
"655173_archetype-reality-friendship-and": 519736,
"655173_sir-roger-scruton-dr-jordan-b-peterson": 490395,
"655173_jp-cf2nqmqifxc": 539940,
"655173_penguin-uk-12-rules-for-life": 413749,
"655173_march-2019-q-and-a": 537138,
"655173_jp-ne5vbomsqjc": 539940,
"655173_dublin-london-harris-murray-new-usa-12": 413749,
"655173_12-rules-12-cities-tickets-now-available": 413749,
"655173_jp-j9j-bvdrgdi": 539940,
"655173_responsibility-conscience-and-meaning": 499123,
"655173_04-harris-murray-peterson-discussion": 436678,
"655173_jp-ayhaz9k008q": 539940,
"655173_with-jocko-willink-the-catastrophe-of": 490395,
"655173_interview-with-the-grievance-studies": 501296,
"655173_russell-brand-jordan-b-peterson-under": 413750,
"655173_goodbye-to-patreon": 496771,
"655173_revamped-podcast-announcement-with": 540943,
"655173_swedes-want-to-know": 285453,
"655173_auckland-clip-2-the-four-fundamental": 607892,
"655173_jp-dtirzqmgbdm": 539940,
"655173_political-correctness-a-force-for-good-a": 413750,
"655173_sean-plunket-full-interview-new-zealand": 597638,
"655173_q-a-the-meaning-and-reality-of": 616984,
"655173_lecture-and-q-a-with-jordan-peterson-the": 413749,
"655173_2017-personality-07-carl-jung-and-the": 578465,
"655173_nina-paley-animator-extraordinaire": 413750,
"655173_truth-as-the-antidote-to-suffering-with": 455127,
"655173_bishop-barron-word-on-fire": 599814,
"655173_zizek-vs-peterson-april-19": 527291,
"655173_revamped-podcast-with-westwood-one": 540943,
"655173_2016-11-19-university-of-toronto-free": 578465,
"655173_jp-1emrmtrj5jc": 539940,
"655173_who-is-joe-rogan-with-jordan-peterson": 585578,
"655173_who-dares-say-he-believes-in-god": 581738,
"655252_games-with-live2d": 589978,
"655252_kaenbyou-rin-live2d": 589978,
"655374_steam-groups-are-crazy": 607590,
"655379_asmr-captain-falcon-happily-beats-you-up": 644574,
"655379_pixel-art-series-5-link-holding-the": 442952,
"655379_who-can-cross-the-planck-length-the-hero": 610830,
"655379_ssbb-the-yoshi-grab-release-crash": 609747,
"655379_tas-captain-falcon-s-bizarre-adventure": 442958,
"655379_super-smash-bros-in-360-test": 442963,
"655379_what-if-luigi-was-b-u-f-f": 442971,
"655803_sun-time-lapse-test-7": 610634,
"655952_upper-build-complete": 591728,
"656758_cryptocurrency-awareness-adoption-the": 541770,
"656829_3d-printing-technologies-comparison": 462685,
"656829_3d-printing-for-everyone": 462685,
"657052_tni-punya-ilmu-kanuragan-gaya-baru": 657045,
"657052_papa-sunimah-nelpon-sri-utami-emon": 657045,
"657274_rapforlife-4-win": 656856,
"657274_bizzilion-proof-of-withdrawal": 656856,
"657420_quick-drawing-prince-tribute-colored": 605630,
"657453_white-boy-tom-mcdonald-facts": 597169,
"657453_is-it-ok-to-look-when-you-with-your-girl": 610508,
"657584_need-for-speed-ryzen-5-1600-gtx-1050-ti": 657161,
"657584_quantum-break-ryzen-5-1600-gtx-1050-ti-4": 657161,
"657584_nightcore-legends-never-die": 657161,
"657706_mtb-enduro-ferragosto-2019-sestri": 638904,
"657706_warface-free-for-all": 638908,
"657782_nick-warren-at-loveland-but-not-really": 444299,
"658098_le-temps-nous-glisse-entre-les-doigts": 600099,
}
}

View file

@ -0,0 +1,9 @@
package temporal
// Repo defines APIs for Temporal to access persistence layer.
type Repo interface {
SetNodesAt(names [][]byte, heights []int32) error
NodesAt(height int32) ([][]byte, error)
Close() error
Flush() error
}

View file

@ -0,0 +1,45 @@
package temporalrepo
type Memory struct {
cache map[int32]map[string]bool
}
func NewMemory() *Memory {
return &Memory{
cache: map[int32]map[string]bool{},
}
}
func (repo *Memory) SetNodesAt(names [][]byte, heights []int32) error {
for i, height := range heights {
c, ok := repo.cache[height]
if !ok {
c = map[string]bool{}
repo.cache[height] = c
}
name := string(names[i])
c[name] = true
}
return nil
}
func (repo *Memory) NodesAt(height int32) ([][]byte, error) {
var names [][]byte
for name := range repo.cache[height] {
names = append(names, []byte(name))
}
return names, nil
}
func (repo *Memory) Close() error {
return nil
}
func (repo *Memory) Flush() error {
return nil
}

View file

@ -0,0 +1,87 @@
package temporalrepo
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
"github.com/cockroachdb/pebble"
)
type Pebble struct {
db *pebble.DB
}
func NewPebble(path string) (*Pebble, error) {
db, err := pebble.Open(path, &pebble.Options{Cache: pebble.NewCache(16 << 20), MaxOpenFiles: 2000})
repo := &Pebble{db: db}
return repo, errors.Wrapf(err, "unable to open %s", path)
}
func (repo *Pebble) SetNodesAt(name [][]byte, heights []int32) error {
// key format: height(4B) + 0(1B) + name(varable length)
key := bytes.NewBuffer(nil)
batch := repo.db.NewBatch()
defer batch.Close()
for i, name := range name {
key.Reset()
binary.Write(key, binary.BigEndian, heights[i])
binary.Write(key, binary.BigEndian, byte(0))
key.Write(name)
err := batch.Set(key.Bytes(), nil, pebble.NoSync)
if err != nil {
return errors.Wrap(err, "in set")
}
}
return errors.Wrap(batch.Commit(pebble.NoSync), "in commit")
}
func (repo *Pebble) NodesAt(height int32) ([][]byte, error) {
prefix := bytes.NewBuffer(nil)
binary.Write(prefix, binary.BigEndian, height)
binary.Write(prefix, binary.BigEndian, byte(0))
end := bytes.NewBuffer(nil)
binary.Write(end, binary.BigEndian, height)
binary.Write(end, binary.BigEndian, byte(1))
prefixIterOptions := &pebble.IterOptions{
LowerBound: prefix.Bytes(),
UpperBound: end.Bytes(),
}
var names [][]byte
iter := repo.db.NewIter(prefixIterOptions)
for iter.First(); iter.Valid(); iter.Next() {
// Skipping the first 5 bytes (height and a null byte), we get the name.
name := make([]byte, len(iter.Key())-5)
copy(name, iter.Key()[5:]) // iter.Key() reuses its buffer
names = append(names, name)
}
return names, errors.Wrap(iter.Close(), "in close")
}
func (repo *Pebble) Close() error {
err := repo.db.Flush()
if err != nil {
// if we fail to close are we going to try again later?
return errors.Wrap(err, "on flush")
}
err = repo.db.Close()
return errors.Wrap(err, "on close")
}
func (repo *Pebble) Flush() error {
_, err := repo.db.AsyncFlush()
return err
}

View file

@ -0,0 +1,80 @@
package temporalrepo
import (
"testing"
"github.com/lbryio/lbcd/claimtrie/temporal"
"github.com/stretchr/testify/require"
)
func TestMemory(t *testing.T) {
repo := NewMemory()
testTemporalRepo(t, repo)
}
func TestPebble(t *testing.T) {
repo, err := NewPebble(t.TempDir())
require.NoError(t, err)
testTemporalRepo(t, repo)
}
func testTemporalRepo(t *testing.T, repo temporal.Repo) {
r := require.New(t)
nameA := []byte("a")
nameB := []byte("b")
nameC := []byte("c")
testcases := []struct {
name []byte
heights []int32
}{
{nameA, []int32{1, 3, 2}},
{nameA, []int32{2, 3}},
{nameB, []int32{5, 4}},
{nameB, []int32{5, 1}},
{nameC, []int32{4, 3, 8}},
}
for _, i := range testcases {
names := make([][]byte, 0, len(i.heights))
for range i.heights {
names = append(names, i.name)
}
err := repo.SetNodesAt(names, i.heights)
r.NoError(err)
}
// a: 1, 2, 3
// b: 1, 5, 4
// c: 4, 3, 8
names, err := repo.NodesAt(2)
r.NoError(err)
r.ElementsMatch([][]byte{nameA}, names)
names, err = repo.NodesAt(5)
r.NoError(err)
r.ElementsMatch([][]byte{nameB}, names)
names, err = repo.NodesAt(8)
r.NoError(err)
r.ElementsMatch([][]byte{nameC}, names)
names, err = repo.NodesAt(1)
r.NoError(err)
r.ElementsMatch([][]byte{nameA, nameB}, names)
names, err = repo.NodesAt(4)
r.NoError(err)
r.ElementsMatch([][]byte{nameB, nameC}, names)
names, err = repo.NodesAt(3)
r.NoError(err)
r.ElementsMatch([][]byte{nameA, nameC}, names)
}