claimtrie/nodemgr/nm.go

156 lines
3 KiB
Go
Raw Normal View History

wip: a few updates so far. (the code is not cleaned up yet, especially DB related part) 1. Separate claim nodes from the Trie to NodeMgr (Node Manager). The Trie is mainly responsible for rsolving the MerkleHash. The Node Manager, which manages all the claim nodes implements KeyValue interface. type KeyValue interface{ Get(Key) error Set(Key, Value) error } When the Trie traverses to the Value node, it consults the KV with the prefix to get the value, which is the Hash of Best Claim. 2. Versioined/Snapshot based/Copy-on-Write Merkle Trie. Every resolved trie node is saved to the TrieDB (leveldb) with it's Hash as Key and content as Value. The content has the following format: Char (1B) Hash (32B) {0 to 256 entries } VHash (32B) (0 or 1 entry) The nodes are immutable and content(hash)-addressable. This gives the benefit of de-dup for free. 3. The NodeManager implements Replay, and can construct any past state. After experimentng on Memento vs Replay with the real dataset on the mainnet. I decided to go with Replay (at least for now) for a few reasons: a. Concurrency and usability. In the real world scenario, the ClaimTrie is always working on the Tip of the chain to accept Claim Script, update its own state and generate the Hash. On the other hand, most of the client requests are interested in the past state with minimal number of confirmations required. With Memento, the ClaimTrie has to either: a. Pin down the node, and likely the ClaimTrie itself as well, as it doesn't have the latest state (in terms of the whole Trie) to resolve the Hash. Undo the changes and redo the changes after serving the request. b. Copy the current state of the node and rollback that node to serve the request in the background. With Replay, the ClaimTrie can simply spin a background task without any pause. The history of the nodes is immutable and read-only, so there is contention in reconstructing a node. b. Negligible performance difference. Most of the nodes only have few commands to playback. The time to playback is negligible, and will be dominated by the I/O if the node was flushed to the disk. c. Simplicity. Implementing undo saves more changes of states during the process, and has to pay much more attention to the bidding rules.
2018-08-03 07:15:08 +02:00
package nodemgr
import (
"fmt"
"sort"
"sync"
"github.com/lbryio/claimtrie/claim"
"github.com/lbryio/claimtrie/trie"
"github.com/syndtr/goleveldb/leveldb"
)
// NodeMgr ...
type NodeMgr struct {
sync.RWMutex
db *leveldb.DB
nodes map[string]*claim.Node
dirty map[string]bool
nextUpdates todos
}
// New ...
func New(db *leveldb.DB) *NodeMgr {
nm := &NodeMgr{
db: db,
nodes: map[string]*claim.Node{},
dirty: map[string]bool{},
nextUpdates: todos{},
}
return nm
}
// Get ...
func (nm *NodeMgr) Get(key trie.Key) (trie.Value, error) {
nm.Lock()
defer nm.Unlock()
if n, ok := nm.nodes[string(key)]; ok {
return n, nil
}
if nm.db != nil {
b, err := nm.db.Get(key, nil)
if err == nil {
_ = b // TODO: Loaded. Deserialize it.
} else if err != leveldb.ErrNotFound {
// DB error. Propagated.
return nil, err
}
}
// New node.
n := claim.NewNode(string(key))
nm.nodes[string(key)] = n
return n, nil
}
// Set ...
func (nm *NodeMgr) Set(key trie.Key, val trie.Value) {
n := val.(*claim.Node)
nm.Lock()
defer nm.Unlock()
nm.nodes[string(key)] = n
nm.dirty[string(key)] = true
// TODO: flush to disk.
}
// Reset resets all nodes to specified height.
func (nm *NodeMgr) Reset(h claim.Height) error {
for _, n := range nm.nodes {
if err := n.Reset(h); err != nil {
return err
}
}
return nil
}
// NodeAt returns the node adjusted to specified height.
func (nm *NodeMgr) NodeAt(name string, h claim.Height) (*claim.Node, error) {
v, err := nm.Get(trie.Key(name))
if err != nil {
return nil, err
}
n := v.(*claim.Node)
if err = n.AdjustTo(h); err != nil {
return nil, err
}
return n, nil
}
// ModifyNode returns the node adjusted to specified height.
func (nm *NodeMgr) ModifyNode(name string, h claim.Height, modifier func(*claim.Node) error) error {
n, err := nm.NodeAt(name, h)
if err != nil {
return err
}
if err = modifier(n); err != nil {
return err
}
nm.nextUpdates.set(name, h+1)
return nil
}
// CatchUp ...
func (nm *NodeMgr) CatchUp(h claim.Height, notifier func(key trie.Key) error) error {
for name := range nm.nextUpdates[h] {
n, err := nm.NodeAt(name, h)
if err != nil {
return err
}
if err = notifier(trie.Key(name)); err != nil {
return err
}
if next := n.NextUpdate(); next > h {
nm.nextUpdates.set(name, next)
}
}
return nil
}
// Show ...
func (nm *NodeMgr) Show(name string) error {
if len(name) != 0 {
fmt.Printf("[%s] %s\n", name, nm.nodes[name])
return nil
}
names := []string{}
for name := range nm.nodes {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
fmt.Printf("[%s] %s\n", name, nm.nodes[name])
}
return nil
}
// UpdateAll ...
func (nm *NodeMgr) UpdateAll(m func(key trie.Key) error) error {
for name := range nm.nodes {
m(trie.Key(name))
}
return nil
}
type todos map[claim.Height]map[string]bool
func (t todos) set(name string, h claim.Height) {
if t[h] == nil {
t[h] = map[string]bool{}
}
t[h][name] = true
}